xref: /linux/drivers/net/usb/lan78xx.c (revision 41fb0cf1bced59c1fe178cf6cc9f716b5da9e40e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 
80 #define	MII_READ			1
81 #define	MII_WRITE			0
82 
83 #define EEPROM_INDICATOR		(0xA5)
84 #define EEPROM_MAC_OFFSET		(0x01)
85 #define MAX_EEPROM_SIZE			512
86 #define OTP_INDICATOR_1			(0xF3)
87 #define OTP_INDICATOR_2			(0xF7)
88 
89 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
90 					 WAKE_MCAST | WAKE_BCAST | \
91 					 WAKE_ARP | WAKE_MAGIC)
92 
93 #define LAN78XX_NAPI_WEIGHT		64
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 struct usb_context {
384 	struct usb_ctrlrequest req;
385 	struct lan78xx_net *dev;
386 };
387 
388 #define EVENT_TX_HALT			0
389 #define EVENT_RX_HALT			1
390 #define EVENT_RX_MEMORY			2
391 #define EVENT_STS_SPLIT			3
392 #define EVENT_LINK_RESET		4
393 #define EVENT_RX_PAUSED			5
394 #define EVENT_DEV_WAKING		6
395 #define EVENT_DEV_ASLEEP		7
396 #define EVENT_DEV_OPEN			8
397 #define EVENT_STAT_UPDATE		9
398 #define EVENT_DEV_DISCONNECT		10
399 
400 struct statstage {
401 	struct mutex			access_lock;	/* for stats access */
402 	struct lan78xx_statstage	saved;
403 	struct lan78xx_statstage	rollover_count;
404 	struct lan78xx_statstage	rollover_max;
405 	struct lan78xx_statstage64	curr_stat;
406 };
407 
408 struct irq_domain_data {
409 	struct irq_domain	*irqdomain;
410 	unsigned int		phyirq;
411 	struct irq_chip		*irqchip;
412 	irq_flow_handler_t	irq_handler;
413 	u32			irqenable;
414 	struct mutex		irq_lock;		/* for irq bus access */
415 };
416 
417 struct lan78xx_net {
418 	struct net_device	*net;
419 	struct usb_device	*udev;
420 	struct usb_interface	*intf;
421 	void			*driver_priv;
422 
423 	unsigned int		tx_pend_data_len;
424 	size_t			n_tx_urbs;
425 	size_t			n_rx_urbs;
426 	size_t			tx_urb_size;
427 	size_t			rx_urb_size;
428 
429 	struct sk_buff_head	rxq_free;
430 	struct sk_buff_head	rxq;
431 	struct sk_buff_head	rxq_done;
432 	struct sk_buff_head	rxq_overflow;
433 	struct sk_buff_head	txq_free;
434 	struct sk_buff_head	txq;
435 	struct sk_buff_head	txq_pend;
436 
437 	struct napi_struct	napi;
438 
439 	struct delayed_work	wq;
440 
441 	int			msg_enable;
442 
443 	struct urb		*urb_intr;
444 	struct usb_anchor	deferred;
445 
446 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
447 	struct mutex		phy_mutex; /* for phy access */
448 	unsigned int		pipe_in, pipe_out, pipe_intr;
449 
450 	unsigned int		bulk_in_delay;
451 	unsigned int		burst_cap;
452 
453 	unsigned long		flags;
454 
455 	wait_queue_head_t	*wait;
456 	unsigned char		suspend_count;
457 
458 	unsigned int		maxpacket;
459 	struct timer_list	stat_monitor;
460 
461 	unsigned long		data[5];
462 
463 	int			link_on;
464 	u8			mdix_ctrl;
465 
466 	u32			chipid;
467 	u32			chiprev;
468 	struct mii_bus		*mdiobus;
469 	phy_interface_t		interface;
470 
471 	int			fc_autoneg;
472 	u8			fc_request_control;
473 
474 	int			delta;
475 	struct statstage	stats;
476 
477 	struct irq_domain_data	domain_data;
478 };
479 
480 /* define external phy id */
481 #define	PHY_LAN8835			(0x0007C130)
482 #define	PHY_KSZ9031RNX			(0x00221620)
483 
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
488 
489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490 {
491 	if (skb_queue_empty(buf_pool))
492 		return NULL;
493 
494 	return skb_dequeue(buf_pool);
495 }
496 
497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498 				struct sk_buff *buf)
499 {
500 	buf->data = buf->head;
501 	skb_reset_tail_pointer(buf);
502 
503 	buf->len = 0;
504 	buf->data_len = 0;
505 
506 	skb_queue_tail(buf_pool, buf);
507 }
508 
509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510 {
511 	struct skb_data *entry;
512 	struct sk_buff *buf;
513 
514 	while (!skb_queue_empty(buf_pool)) {
515 		buf = skb_dequeue(buf_pool);
516 		if (buf) {
517 			entry = (struct skb_data *)buf->cb;
518 			usb_free_urb(entry->urb);
519 			dev_kfree_skb_any(buf);
520 		}
521 	}
522 }
523 
524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525 				  size_t n_urbs, size_t urb_size,
526 				  struct lan78xx_net *dev)
527 {
528 	struct skb_data *entry;
529 	struct sk_buff *buf;
530 	struct urb *urb;
531 	int i;
532 
533 	skb_queue_head_init(buf_pool);
534 
535 	for (i = 0; i < n_urbs; i++) {
536 		buf = alloc_skb(urb_size, GFP_ATOMIC);
537 		if (!buf)
538 			goto error;
539 
540 		if (skb_linearize(buf) != 0) {
541 			dev_kfree_skb_any(buf);
542 			goto error;
543 		}
544 
545 		urb = usb_alloc_urb(0, GFP_ATOMIC);
546 		if (!urb) {
547 			dev_kfree_skb_any(buf);
548 			goto error;
549 		}
550 
551 		entry = (struct skb_data *)buf->cb;
552 		entry->urb = urb;
553 		entry->dev = dev;
554 		entry->length = 0;
555 		entry->num_of_packet = 0;
556 
557 		skb_queue_tail(buf_pool, buf);
558 	}
559 
560 	return 0;
561 
562 error:
563 	lan78xx_free_buf_pool(buf_pool);
564 
565 	return -ENOMEM;
566 }
567 
568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569 {
570 	return lan78xx_get_buf(&dev->rxq_free);
571 }
572 
573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574 				   struct sk_buff *rx_buf)
575 {
576 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
577 }
578 
579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580 {
581 	lan78xx_free_buf_pool(&dev->rxq_free);
582 }
583 
584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585 {
586 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
587 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
588 }
589 
590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591 {
592 	return lan78xx_get_buf(&dev->txq_free);
593 }
594 
595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596 				   struct sk_buff *tx_buf)
597 {
598 	lan78xx_release_buf(&dev->txq_free, tx_buf);
599 }
600 
601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602 {
603 	lan78xx_free_buf_pool(&dev->txq_free);
604 }
605 
606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607 {
608 	return lan78xx_alloc_buf_pool(&dev->txq_free,
609 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
610 }
611 
612 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
613 {
614 	u32 *buf;
615 	int ret;
616 
617 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
618 		return -ENODEV;
619 
620 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
621 	if (!buf)
622 		return -ENOMEM;
623 
624 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
625 			      USB_VENDOR_REQUEST_READ_REGISTER,
626 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
627 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
628 	if (likely(ret >= 0)) {
629 		le32_to_cpus(buf);
630 		*data = *buf;
631 	} else if (net_ratelimit()) {
632 		netdev_warn(dev->net,
633 			    "Failed to read register index 0x%08x. ret = %d",
634 			    index, ret);
635 	}
636 
637 	kfree(buf);
638 
639 	return ret;
640 }
641 
642 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
643 {
644 	u32 *buf;
645 	int ret;
646 
647 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
648 		return -ENODEV;
649 
650 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
651 	if (!buf)
652 		return -ENOMEM;
653 
654 	*buf = data;
655 	cpu_to_le32s(buf);
656 
657 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
658 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
659 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
660 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
661 	if (unlikely(ret < 0) &&
662 	    net_ratelimit()) {
663 		netdev_warn(dev->net,
664 			    "Failed to write register index 0x%08x. ret = %d",
665 			    index, ret);
666 	}
667 
668 	kfree(buf);
669 
670 	return ret;
671 }
672 
673 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
674 			      u32 data)
675 {
676 	int ret;
677 	u32 buf;
678 
679 	ret = lan78xx_read_reg(dev, reg, &buf);
680 	if (ret < 0)
681 		return ret;
682 
683 	buf &= ~mask;
684 	buf |= (mask & data);
685 
686 	ret = lan78xx_write_reg(dev, reg, buf);
687 	if (ret < 0)
688 		return ret;
689 
690 	return 0;
691 }
692 
693 static int lan78xx_read_stats(struct lan78xx_net *dev,
694 			      struct lan78xx_statstage *data)
695 {
696 	int ret = 0;
697 	int i;
698 	struct lan78xx_statstage *stats;
699 	u32 *src;
700 	u32 *dst;
701 
702 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
703 	if (!stats)
704 		return -ENOMEM;
705 
706 	ret = usb_control_msg(dev->udev,
707 			      usb_rcvctrlpipe(dev->udev, 0),
708 			      USB_VENDOR_REQUEST_GET_STATS,
709 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
710 			      0,
711 			      0,
712 			      (void *)stats,
713 			      sizeof(*stats),
714 			      USB_CTRL_SET_TIMEOUT);
715 	if (likely(ret >= 0)) {
716 		src = (u32 *)stats;
717 		dst = (u32 *)data;
718 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
719 			le32_to_cpus(&src[i]);
720 			dst[i] = src[i];
721 		}
722 	} else {
723 		netdev_warn(dev->net,
724 			    "Failed to read stat ret = %d", ret);
725 	}
726 
727 	kfree(stats);
728 
729 	return ret;
730 }
731 
732 #define check_counter_rollover(struct1, dev_stats, member)		\
733 	do {								\
734 		if ((struct1)->member < (dev_stats).saved.member)	\
735 			(dev_stats).rollover_count.member++;		\
736 	} while (0)
737 
738 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
739 					struct lan78xx_statstage *stats)
740 {
741 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
742 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
743 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
744 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
745 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
746 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
747 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
748 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
749 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
750 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
752 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
753 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
755 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
756 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
757 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
759 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
761 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
762 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
763 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
764 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
765 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
766 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
767 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
768 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
769 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
770 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
771 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
773 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
774 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
776 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
777 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
779 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
780 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
781 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
783 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
785 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
786 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
787 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
788 
789 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
790 }
791 
792 static void lan78xx_update_stats(struct lan78xx_net *dev)
793 {
794 	u32 *p, *count, *max;
795 	u64 *data;
796 	int i;
797 	struct lan78xx_statstage lan78xx_stats;
798 
799 	if (usb_autopm_get_interface(dev->intf) < 0)
800 		return;
801 
802 	p = (u32 *)&lan78xx_stats;
803 	count = (u32 *)&dev->stats.rollover_count;
804 	max = (u32 *)&dev->stats.rollover_max;
805 	data = (u64 *)&dev->stats.curr_stat;
806 
807 	mutex_lock(&dev->stats.access_lock);
808 
809 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
810 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
811 
812 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
813 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
814 
815 	mutex_unlock(&dev->stats.access_lock);
816 
817 	usb_autopm_put_interface(dev->intf);
818 }
819 
820 /* Loop until the read is completed with timeout called with phy_mutex held */
821 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
822 {
823 	unsigned long start_time = jiffies;
824 	u32 val;
825 	int ret;
826 
827 	do {
828 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
829 		if (unlikely(ret < 0))
830 			return -EIO;
831 
832 		if (!(val & MII_ACC_MII_BUSY_))
833 			return 0;
834 	} while (!time_after(jiffies, start_time + HZ));
835 
836 	return -EIO;
837 }
838 
839 static inline u32 mii_access(int id, int index, int read)
840 {
841 	u32 ret;
842 
843 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
844 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
845 	if (read)
846 		ret |= MII_ACC_MII_READ_;
847 	else
848 		ret |= MII_ACC_MII_WRITE_;
849 	ret |= MII_ACC_MII_BUSY_;
850 
851 	return ret;
852 }
853 
854 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
855 {
856 	unsigned long start_time = jiffies;
857 	u32 val;
858 	int ret;
859 
860 	do {
861 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
862 		if (unlikely(ret < 0))
863 			return -EIO;
864 
865 		if (!(val & E2P_CMD_EPC_BUSY_) ||
866 		    (val & E2P_CMD_EPC_TIMEOUT_))
867 			break;
868 		usleep_range(40, 100);
869 	} while (!time_after(jiffies, start_time + HZ));
870 
871 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
872 		netdev_warn(dev->net, "EEPROM read operation timeout");
873 		return -EIO;
874 	}
875 
876 	return 0;
877 }
878 
879 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
880 {
881 	unsigned long start_time = jiffies;
882 	u32 val;
883 	int ret;
884 
885 	do {
886 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
887 		if (unlikely(ret < 0))
888 			return -EIO;
889 
890 		if (!(val & E2P_CMD_EPC_BUSY_))
891 			return 0;
892 
893 		usleep_range(40, 100);
894 	} while (!time_after(jiffies, start_time + HZ));
895 
896 	netdev_warn(dev->net, "EEPROM is busy");
897 	return -EIO;
898 }
899 
900 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
901 				   u32 length, u8 *data)
902 {
903 	u32 val;
904 	u32 saved;
905 	int i, ret;
906 	int retval;
907 
908 	/* depends on chip, some EEPROM pins are muxed with LED function.
909 	 * disable & restore LED function to access EEPROM.
910 	 */
911 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
912 	saved = val;
913 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
914 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
915 		ret = lan78xx_write_reg(dev, HW_CFG, val);
916 	}
917 
918 	retval = lan78xx_eeprom_confirm_not_busy(dev);
919 	if (retval)
920 		return retval;
921 
922 	for (i = 0; i < length; i++) {
923 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
924 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
925 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
926 		if (unlikely(ret < 0)) {
927 			retval = -EIO;
928 			goto exit;
929 		}
930 
931 		retval = lan78xx_wait_eeprom(dev);
932 		if (retval < 0)
933 			goto exit;
934 
935 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
936 		if (unlikely(ret < 0)) {
937 			retval = -EIO;
938 			goto exit;
939 		}
940 
941 		data[i] = val & 0xFF;
942 		offset++;
943 	}
944 
945 	retval = 0;
946 exit:
947 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
948 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
949 
950 	return retval;
951 }
952 
953 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
954 			       u32 length, u8 *data)
955 {
956 	u8 sig;
957 	int ret;
958 
959 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
960 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
961 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
962 	else
963 		ret = -EINVAL;
964 
965 	return ret;
966 }
967 
968 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
969 				    u32 length, u8 *data)
970 {
971 	u32 val;
972 	u32 saved;
973 	int i, ret;
974 	int retval;
975 
976 	/* depends on chip, some EEPROM pins are muxed with LED function.
977 	 * disable & restore LED function to access EEPROM.
978 	 */
979 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
980 	saved = val;
981 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
982 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
983 		ret = lan78xx_write_reg(dev, HW_CFG, val);
984 	}
985 
986 	retval = lan78xx_eeprom_confirm_not_busy(dev);
987 	if (retval)
988 		goto exit;
989 
990 	/* Issue write/erase enable command */
991 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
992 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
993 	if (unlikely(ret < 0)) {
994 		retval = -EIO;
995 		goto exit;
996 	}
997 
998 	retval = lan78xx_wait_eeprom(dev);
999 	if (retval < 0)
1000 		goto exit;
1001 
1002 	for (i = 0; i < length; i++) {
1003 		/* Fill data register */
1004 		val = data[i];
1005 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1006 		if (ret < 0) {
1007 			retval = -EIO;
1008 			goto exit;
1009 		}
1010 
1011 		/* Send "write" command */
1012 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1013 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1014 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1015 		if (ret < 0) {
1016 			retval = -EIO;
1017 			goto exit;
1018 		}
1019 
1020 		retval = lan78xx_wait_eeprom(dev);
1021 		if (retval < 0)
1022 			goto exit;
1023 
1024 		offset++;
1025 	}
1026 
1027 	retval = 0;
1028 exit:
1029 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1030 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1031 
1032 	return retval;
1033 }
1034 
1035 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1036 				u32 length, u8 *data)
1037 {
1038 	int i;
1039 	u32 buf;
1040 	unsigned long timeout;
1041 
1042 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1043 
1044 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1045 		/* clear it and wait to be cleared */
1046 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1047 
1048 		timeout = jiffies + HZ;
1049 		do {
1050 			usleep_range(1, 10);
1051 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1052 			if (time_after(jiffies, timeout)) {
1053 				netdev_warn(dev->net,
1054 					    "timeout on OTP_PWR_DN");
1055 				return -EIO;
1056 			}
1057 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1058 	}
1059 
1060 	for (i = 0; i < length; i++) {
1061 		lan78xx_write_reg(dev, OTP_ADDR1,
1062 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1063 		lan78xx_write_reg(dev, OTP_ADDR2,
1064 				  ((offset + i) & OTP_ADDR2_10_3));
1065 
1066 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1067 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1068 
1069 		timeout = jiffies + HZ;
1070 		do {
1071 			udelay(1);
1072 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1073 			if (time_after(jiffies, timeout)) {
1074 				netdev_warn(dev->net,
1075 					    "timeout on OTP_STATUS");
1076 				return -EIO;
1077 			}
1078 		} while (buf & OTP_STATUS_BUSY_);
1079 
1080 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1081 
1082 		data[i] = (u8)(buf & 0xFF);
1083 	}
1084 
1085 	return 0;
1086 }
1087 
1088 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1089 				 u32 length, u8 *data)
1090 {
1091 	int i;
1092 	u32 buf;
1093 	unsigned long timeout;
1094 
1095 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096 
1097 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1098 		/* clear it and wait to be cleared */
1099 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1100 
1101 		timeout = jiffies + HZ;
1102 		do {
1103 			udelay(1);
1104 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1105 			if (time_after(jiffies, timeout)) {
1106 				netdev_warn(dev->net,
1107 					    "timeout on OTP_PWR_DN completion");
1108 				return -EIO;
1109 			}
1110 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1111 	}
1112 
1113 	/* set to BYTE program mode */
1114 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1115 
1116 	for (i = 0; i < length; i++) {
1117 		lan78xx_write_reg(dev, OTP_ADDR1,
1118 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1119 		lan78xx_write_reg(dev, OTP_ADDR2,
1120 				  ((offset + i) & OTP_ADDR2_10_3));
1121 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1122 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1123 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1124 
1125 		timeout = jiffies + HZ;
1126 		do {
1127 			udelay(1);
1128 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1129 			if (time_after(jiffies, timeout)) {
1130 				netdev_warn(dev->net,
1131 					    "Timeout on OTP_STATUS completion");
1132 				return -EIO;
1133 			}
1134 		} while (buf & OTP_STATUS_BUSY_);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1141 			    u32 length, u8 *data)
1142 {
1143 	u8 sig;
1144 	int ret;
1145 
1146 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1147 
1148 	if (ret == 0) {
1149 		if (sig == OTP_INDICATOR_2)
1150 			offset += 0x100;
1151 		else if (sig != OTP_INDICATOR_1)
1152 			ret = -EINVAL;
1153 		if (!ret)
1154 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1155 	}
1156 
1157 	return ret;
1158 }
1159 
1160 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1161 {
1162 	int i, ret;
1163 
1164 	for (i = 0; i < 100; i++) {
1165 		u32 dp_sel;
1166 
1167 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1168 		if (unlikely(ret < 0))
1169 			return -EIO;
1170 
1171 		if (dp_sel & DP_SEL_DPRDY_)
1172 			return 0;
1173 
1174 		usleep_range(40, 100);
1175 	}
1176 
1177 	netdev_warn(dev->net, "%s timed out", __func__);
1178 
1179 	return -EIO;
1180 }
1181 
1182 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1183 				  u32 addr, u32 length, u32 *buf)
1184 {
1185 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1186 	u32 dp_sel;
1187 	int i, ret;
1188 
1189 	if (usb_autopm_get_interface(dev->intf) < 0)
1190 		return 0;
1191 
1192 	mutex_lock(&pdata->dataport_mutex);
1193 
1194 	ret = lan78xx_dataport_wait_not_busy(dev);
1195 	if (ret < 0)
1196 		goto done;
1197 
1198 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1199 
1200 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1201 	dp_sel |= ram_select;
1202 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1203 
1204 	for (i = 0; i < length; i++) {
1205 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1206 
1207 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1210 
1211 		ret = lan78xx_dataport_wait_not_busy(dev);
1212 		if (ret < 0)
1213 			goto done;
1214 	}
1215 
1216 done:
1217 	mutex_unlock(&pdata->dataport_mutex);
1218 	usb_autopm_put_interface(dev->intf);
1219 
1220 	return ret;
1221 }
1222 
1223 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1224 				    int index, u8 addr[ETH_ALEN])
1225 {
1226 	u32 temp;
1227 
1228 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1229 		temp = addr[3];
1230 		temp = addr[2] | (temp << 8);
1231 		temp = addr[1] | (temp << 8);
1232 		temp = addr[0] | (temp << 8);
1233 		pdata->pfilter_table[index][1] = temp;
1234 		temp = addr[5];
1235 		temp = addr[4] | (temp << 8);
1236 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1237 		pdata->pfilter_table[index][0] = temp;
1238 	}
1239 }
1240 
1241 /* returns hash bit number for given MAC address */
1242 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1243 {
1244 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1245 }
1246 
1247 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1248 {
1249 	struct lan78xx_priv *pdata =
1250 			container_of(param, struct lan78xx_priv, set_multicast);
1251 	struct lan78xx_net *dev = pdata->dev;
1252 	int i;
1253 
1254 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1255 		  pdata->rfe_ctl);
1256 
1257 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1258 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1259 
1260 	for (i = 1; i < NUM_OF_MAF; i++) {
1261 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1262 		lan78xx_write_reg(dev, MAF_LO(i),
1263 				  pdata->pfilter_table[i][1]);
1264 		lan78xx_write_reg(dev, MAF_HI(i),
1265 				  pdata->pfilter_table[i][0]);
1266 	}
1267 
1268 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1269 }
1270 
1271 static void lan78xx_set_multicast(struct net_device *netdev)
1272 {
1273 	struct lan78xx_net *dev = netdev_priv(netdev);
1274 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275 	unsigned long flags;
1276 	int i;
1277 
1278 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1279 
1280 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1281 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1282 
1283 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1284 		pdata->mchash_table[i] = 0;
1285 
1286 	/* pfilter_table[0] has own HW address */
1287 	for (i = 1; i < NUM_OF_MAF; i++) {
1288 		pdata->pfilter_table[i][0] = 0;
1289 		pdata->pfilter_table[i][1] = 0;
1290 	}
1291 
1292 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1293 
1294 	if (dev->net->flags & IFF_PROMISC) {
1295 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1296 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1297 	} else {
1298 		if (dev->net->flags & IFF_ALLMULTI) {
1299 			netif_dbg(dev, drv, dev->net,
1300 				  "receive all multicast enabled");
1301 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1302 		}
1303 	}
1304 
1305 	if (netdev_mc_count(dev->net)) {
1306 		struct netdev_hw_addr *ha;
1307 		int i;
1308 
1309 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1310 
1311 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1312 
1313 		i = 1;
1314 		netdev_for_each_mc_addr(ha, netdev) {
1315 			/* set first 32 into Perfect Filter */
1316 			if (i < 33) {
1317 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1318 			} else {
1319 				u32 bitnum = lan78xx_hash(ha->addr);
1320 
1321 				pdata->mchash_table[bitnum / 32] |=
1322 							(1 << (bitnum % 32));
1323 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1324 			}
1325 			i++;
1326 		}
1327 	}
1328 
1329 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1330 
1331 	/* defer register writes to a sleepable context */
1332 	schedule_work(&pdata->set_multicast);
1333 }
1334 
1335 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1336 				      u16 lcladv, u16 rmtadv)
1337 {
1338 	u32 flow = 0, fct_flow = 0;
1339 	u8 cap;
1340 
1341 	if (dev->fc_autoneg)
1342 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1343 	else
1344 		cap = dev->fc_request_control;
1345 
1346 	if (cap & FLOW_CTRL_TX)
1347 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1348 
1349 	if (cap & FLOW_CTRL_RX)
1350 		flow |= FLOW_CR_RX_FCEN_;
1351 
1352 	if (dev->udev->speed == USB_SPEED_SUPER)
1353 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1354 	else if (dev->udev->speed == USB_SPEED_HIGH)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1356 
1357 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1358 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1359 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1360 
1361 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1362 
1363 	/* threshold value should be set before enabling flow */
1364 	lan78xx_write_reg(dev, FLOW, flow);
1365 
1366 	return 0;
1367 }
1368 
1369 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1370 
1371 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1372 {
1373 	unsigned long start_time = jiffies;
1374 	u32 val;
1375 	int ret;
1376 
1377 	mutex_lock(&dev->phy_mutex);
1378 
1379 	/* Resetting the device while there is activity on the MDIO
1380 	 * bus can result in the MAC interface locking up and not
1381 	 * completing register access transactions.
1382 	 */
1383 	ret = lan78xx_phy_wait_not_busy(dev);
1384 	if (ret < 0)
1385 		goto done;
1386 
1387 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1388 	if (ret < 0)
1389 		goto done;
1390 
1391 	val |= MAC_CR_RST_;
1392 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1393 	if (ret < 0)
1394 		goto done;
1395 
1396 	/* Wait for the reset to complete before allowing any further
1397 	 * MAC register accesses otherwise the MAC may lock up.
1398 	 */
1399 	do {
1400 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1401 		if (ret < 0)
1402 			goto done;
1403 
1404 		if (!(val & MAC_CR_RST_)) {
1405 			ret = 0;
1406 			goto done;
1407 		}
1408 	} while (!time_after(jiffies, start_time + HZ));
1409 
1410 	ret = -ETIMEDOUT;
1411 done:
1412 	mutex_unlock(&dev->phy_mutex);
1413 
1414 	return ret;
1415 }
1416 
1417 static int lan78xx_link_reset(struct lan78xx_net *dev)
1418 {
1419 	struct phy_device *phydev = dev->net->phydev;
1420 	struct ethtool_link_ksettings ecmd;
1421 	int ladv, radv, ret, link;
1422 	u32 buf;
1423 
1424 	/* clear LAN78xx interrupt status */
1425 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1426 	if (unlikely(ret < 0))
1427 		return ret;
1428 
1429 	mutex_lock(&phydev->lock);
1430 	phy_read_status(phydev);
1431 	link = phydev->link;
1432 	mutex_unlock(&phydev->lock);
1433 
1434 	if (!link && dev->link_on) {
1435 		dev->link_on = false;
1436 
1437 		/* reset MAC */
1438 		ret = lan78xx_mac_reset(dev);
1439 		if (ret < 0)
1440 			return ret;
1441 
1442 		del_timer(&dev->stat_monitor);
1443 	} else if (link && !dev->link_on) {
1444 		dev->link_on = true;
1445 
1446 		phy_ethtool_ksettings_get(phydev, &ecmd);
1447 
1448 		if (dev->udev->speed == USB_SPEED_SUPER) {
1449 			if (ecmd.base.speed == 1000) {
1450 				/* disable U2 */
1451 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1452 				if (ret < 0)
1453 					return ret;
1454 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1455 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1456 				if (ret < 0)
1457 					return ret;
1458 				/* enable U1 */
1459 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1460 				if (ret < 0)
1461 					return ret;
1462 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1463 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1464 				if (ret < 0)
1465 					return ret;
1466 			} else {
1467 				/* enable U1 & U2 */
1468 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469 				if (ret < 0)
1470 					return ret;
1471 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1472 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1473 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1474 				if (ret < 0)
1475 					return ret;
1476 			}
1477 		}
1478 
1479 		ladv = phy_read(phydev, MII_ADVERTISE);
1480 		if (ladv < 0)
1481 			return ladv;
1482 
1483 		radv = phy_read(phydev, MII_LPA);
1484 		if (radv < 0)
1485 			return radv;
1486 
1487 		netif_dbg(dev, link, dev->net,
1488 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1489 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1490 
1491 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1492 						 radv);
1493 		if (ret < 0)
1494 			return ret;
1495 
1496 		if (!timer_pending(&dev->stat_monitor)) {
1497 			dev->delta = 1;
1498 			mod_timer(&dev->stat_monitor,
1499 				  jiffies + STAT_UPDATE_TIMER);
1500 		}
1501 
1502 		lan78xx_rx_urb_submit_all(dev);
1503 
1504 		napi_schedule(&dev->napi);
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /* some work can't be done in tasklets, so we use keventd
1511  *
1512  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1513  * but tasklet_schedule() doesn't.	hope the failure is rare.
1514  */
1515 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1516 {
1517 	set_bit(work, &dev->flags);
1518 	if (!schedule_delayed_work(&dev->wq, 0))
1519 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1520 }
1521 
1522 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1523 {
1524 	u32 intdata;
1525 
1526 	if (urb->actual_length != 4) {
1527 		netdev_warn(dev->net,
1528 			    "unexpected urb length %d", urb->actual_length);
1529 		return;
1530 	}
1531 
1532 	intdata = get_unaligned_le32(urb->transfer_buffer);
1533 
1534 	if (intdata & INT_ENP_PHY_INT) {
1535 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1536 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1537 
1538 		if (dev->domain_data.phyirq > 0) {
1539 			local_irq_disable();
1540 			generic_handle_irq(dev->domain_data.phyirq);
1541 			local_irq_enable();
1542 		}
1543 	} else {
1544 		netdev_warn(dev->net,
1545 			    "unexpected interrupt: 0x%08x\n", intdata);
1546 	}
1547 }
1548 
1549 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1550 {
1551 	return MAX_EEPROM_SIZE;
1552 }
1553 
1554 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1555 				      struct ethtool_eeprom *ee, u8 *data)
1556 {
1557 	struct lan78xx_net *dev = netdev_priv(netdev);
1558 	int ret;
1559 
1560 	ret = usb_autopm_get_interface(dev->intf);
1561 	if (ret)
1562 		return ret;
1563 
1564 	ee->magic = LAN78XX_EEPROM_MAGIC;
1565 
1566 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1567 
1568 	usb_autopm_put_interface(dev->intf);
1569 
1570 	return ret;
1571 }
1572 
1573 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1574 				      struct ethtool_eeprom *ee, u8 *data)
1575 {
1576 	struct lan78xx_net *dev = netdev_priv(netdev);
1577 	int ret;
1578 
1579 	ret = usb_autopm_get_interface(dev->intf);
1580 	if (ret)
1581 		return ret;
1582 
1583 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1584 	 * to load data from EEPROM
1585 	 */
1586 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1587 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1588 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1589 		 (ee->offset == 0) &&
1590 		 (ee->len == 512) &&
1591 		 (data[0] == OTP_INDICATOR_1))
1592 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1593 
1594 	usb_autopm_put_interface(dev->intf);
1595 
1596 	return ret;
1597 }
1598 
1599 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1600 				u8 *data)
1601 {
1602 	if (stringset == ETH_SS_STATS)
1603 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1604 }
1605 
1606 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1607 {
1608 	if (sset == ETH_SS_STATS)
1609 		return ARRAY_SIZE(lan78xx_gstrings);
1610 	else
1611 		return -EOPNOTSUPP;
1612 }
1613 
1614 static void lan78xx_get_stats(struct net_device *netdev,
1615 			      struct ethtool_stats *stats, u64 *data)
1616 {
1617 	struct lan78xx_net *dev = netdev_priv(netdev);
1618 
1619 	lan78xx_update_stats(dev);
1620 
1621 	mutex_lock(&dev->stats.access_lock);
1622 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1623 	mutex_unlock(&dev->stats.access_lock);
1624 }
1625 
1626 static void lan78xx_get_wol(struct net_device *netdev,
1627 			    struct ethtool_wolinfo *wol)
1628 {
1629 	struct lan78xx_net *dev = netdev_priv(netdev);
1630 	int ret;
1631 	u32 buf;
1632 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1633 
1634 	if (usb_autopm_get_interface(dev->intf) < 0)
1635 		return;
1636 
1637 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1638 	if (unlikely(ret < 0)) {
1639 		wol->supported = 0;
1640 		wol->wolopts = 0;
1641 	} else {
1642 		if (buf & USB_CFG_RMT_WKP_) {
1643 			wol->supported = WAKE_ALL;
1644 			wol->wolopts = pdata->wol;
1645 		} else {
1646 			wol->supported = 0;
1647 			wol->wolopts = 0;
1648 		}
1649 	}
1650 
1651 	usb_autopm_put_interface(dev->intf);
1652 }
1653 
1654 static int lan78xx_set_wol(struct net_device *netdev,
1655 			   struct ethtool_wolinfo *wol)
1656 {
1657 	struct lan78xx_net *dev = netdev_priv(netdev);
1658 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1659 	int ret;
1660 
1661 	ret = usb_autopm_get_interface(dev->intf);
1662 	if (ret < 0)
1663 		return ret;
1664 
1665 	if (wol->wolopts & ~WAKE_ALL)
1666 		return -EINVAL;
1667 
1668 	pdata->wol = wol->wolopts;
1669 
1670 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1671 
1672 	phy_ethtool_set_wol(netdev->phydev, wol);
1673 
1674 	usb_autopm_put_interface(dev->intf);
1675 
1676 	return ret;
1677 }
1678 
1679 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1680 {
1681 	struct lan78xx_net *dev = netdev_priv(net);
1682 	struct phy_device *phydev = net->phydev;
1683 	int ret;
1684 	u32 buf;
1685 
1686 	ret = usb_autopm_get_interface(dev->intf);
1687 	if (ret < 0)
1688 		return ret;
1689 
1690 	ret = phy_ethtool_get_eee(phydev, edata);
1691 	if (ret < 0)
1692 		goto exit;
1693 
1694 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1695 	if (buf & MAC_CR_EEE_EN_) {
1696 		edata->eee_enabled = true;
1697 		edata->eee_active = !!(edata->advertised &
1698 				       edata->lp_advertised);
1699 		edata->tx_lpi_enabled = true;
1700 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1701 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1702 		edata->tx_lpi_timer = buf;
1703 	} else {
1704 		edata->eee_enabled = false;
1705 		edata->eee_active = false;
1706 		edata->tx_lpi_enabled = false;
1707 		edata->tx_lpi_timer = 0;
1708 	}
1709 
1710 	ret = 0;
1711 exit:
1712 	usb_autopm_put_interface(dev->intf);
1713 
1714 	return ret;
1715 }
1716 
1717 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1718 {
1719 	struct lan78xx_net *dev = netdev_priv(net);
1720 	int ret;
1721 	u32 buf;
1722 
1723 	ret = usb_autopm_get_interface(dev->intf);
1724 	if (ret < 0)
1725 		return ret;
1726 
1727 	if (edata->eee_enabled) {
1728 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1729 		buf |= MAC_CR_EEE_EN_;
1730 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1731 
1732 		phy_ethtool_set_eee(net->phydev, edata);
1733 
1734 		buf = (u32)edata->tx_lpi_timer;
1735 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1736 	} else {
1737 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1738 		buf &= ~MAC_CR_EEE_EN_;
1739 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1740 	}
1741 
1742 	usb_autopm_put_interface(dev->intf);
1743 
1744 	return 0;
1745 }
1746 
1747 static u32 lan78xx_get_link(struct net_device *net)
1748 {
1749 	u32 link;
1750 
1751 	mutex_lock(&net->phydev->lock);
1752 	phy_read_status(net->phydev);
1753 	link = net->phydev->link;
1754 	mutex_unlock(&net->phydev->lock);
1755 
1756 	return link;
1757 }
1758 
1759 static void lan78xx_get_drvinfo(struct net_device *net,
1760 				struct ethtool_drvinfo *info)
1761 {
1762 	struct lan78xx_net *dev = netdev_priv(net);
1763 
1764 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1765 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1766 }
1767 
1768 static u32 lan78xx_get_msglevel(struct net_device *net)
1769 {
1770 	struct lan78xx_net *dev = netdev_priv(net);
1771 
1772 	return dev->msg_enable;
1773 }
1774 
1775 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1776 {
1777 	struct lan78xx_net *dev = netdev_priv(net);
1778 
1779 	dev->msg_enable = level;
1780 }
1781 
1782 static int lan78xx_get_link_ksettings(struct net_device *net,
1783 				      struct ethtool_link_ksettings *cmd)
1784 {
1785 	struct lan78xx_net *dev = netdev_priv(net);
1786 	struct phy_device *phydev = net->phydev;
1787 	int ret;
1788 
1789 	ret = usb_autopm_get_interface(dev->intf);
1790 	if (ret < 0)
1791 		return ret;
1792 
1793 	phy_ethtool_ksettings_get(phydev, cmd);
1794 
1795 	usb_autopm_put_interface(dev->intf);
1796 
1797 	return ret;
1798 }
1799 
1800 static int lan78xx_set_link_ksettings(struct net_device *net,
1801 				      const struct ethtool_link_ksettings *cmd)
1802 {
1803 	struct lan78xx_net *dev = netdev_priv(net);
1804 	struct phy_device *phydev = net->phydev;
1805 	int ret = 0;
1806 	int temp;
1807 
1808 	ret = usb_autopm_get_interface(dev->intf);
1809 	if (ret < 0)
1810 		return ret;
1811 
1812 	/* change speed & duplex */
1813 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1814 
1815 	if (!cmd->base.autoneg) {
1816 		/* force link down */
1817 		temp = phy_read(phydev, MII_BMCR);
1818 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1819 		mdelay(1);
1820 		phy_write(phydev, MII_BMCR, temp);
1821 	}
1822 
1823 	usb_autopm_put_interface(dev->intf);
1824 
1825 	return ret;
1826 }
1827 
1828 static void lan78xx_get_pause(struct net_device *net,
1829 			      struct ethtool_pauseparam *pause)
1830 {
1831 	struct lan78xx_net *dev = netdev_priv(net);
1832 	struct phy_device *phydev = net->phydev;
1833 	struct ethtool_link_ksettings ecmd;
1834 
1835 	phy_ethtool_ksettings_get(phydev, &ecmd);
1836 
1837 	pause->autoneg = dev->fc_autoneg;
1838 
1839 	if (dev->fc_request_control & FLOW_CTRL_TX)
1840 		pause->tx_pause = 1;
1841 
1842 	if (dev->fc_request_control & FLOW_CTRL_RX)
1843 		pause->rx_pause = 1;
1844 }
1845 
1846 static int lan78xx_set_pause(struct net_device *net,
1847 			     struct ethtool_pauseparam *pause)
1848 {
1849 	struct lan78xx_net *dev = netdev_priv(net);
1850 	struct phy_device *phydev = net->phydev;
1851 	struct ethtool_link_ksettings ecmd;
1852 	int ret;
1853 
1854 	phy_ethtool_ksettings_get(phydev, &ecmd);
1855 
1856 	if (pause->autoneg && !ecmd.base.autoneg) {
1857 		ret = -EINVAL;
1858 		goto exit;
1859 	}
1860 
1861 	dev->fc_request_control = 0;
1862 	if (pause->rx_pause)
1863 		dev->fc_request_control |= FLOW_CTRL_RX;
1864 
1865 	if (pause->tx_pause)
1866 		dev->fc_request_control |= FLOW_CTRL_TX;
1867 
1868 	if (ecmd.base.autoneg) {
1869 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1870 		u32 mii_adv;
1871 
1872 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1873 				   ecmd.link_modes.advertising);
1874 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1875 				   ecmd.link_modes.advertising);
1876 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1877 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1878 		linkmode_or(ecmd.link_modes.advertising, fc,
1879 			    ecmd.link_modes.advertising);
1880 
1881 		phy_ethtool_ksettings_set(phydev, &ecmd);
1882 	}
1883 
1884 	dev->fc_autoneg = pause->autoneg;
1885 
1886 	ret = 0;
1887 exit:
1888 	return ret;
1889 }
1890 
1891 static int lan78xx_get_regs_len(struct net_device *netdev)
1892 {
1893 	if (!netdev->phydev)
1894 		return (sizeof(lan78xx_regs));
1895 	else
1896 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1897 }
1898 
1899 static void
1900 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1901 		 void *buf)
1902 {
1903 	u32 *data = buf;
1904 	int i, j;
1905 	struct lan78xx_net *dev = netdev_priv(netdev);
1906 
1907 	/* Read Device/MAC registers */
1908 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1909 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1910 
1911 	if (!netdev->phydev)
1912 		return;
1913 
1914 	/* Read PHY registers */
1915 	for (j = 0; j < 32; i++, j++)
1916 		data[i] = phy_read(netdev->phydev, j);
1917 }
1918 
1919 static const struct ethtool_ops lan78xx_ethtool_ops = {
1920 	.get_link	= lan78xx_get_link,
1921 	.nway_reset	= phy_ethtool_nway_reset,
1922 	.get_drvinfo	= lan78xx_get_drvinfo,
1923 	.get_msglevel	= lan78xx_get_msglevel,
1924 	.set_msglevel	= lan78xx_set_msglevel,
1925 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1926 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1927 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1928 	.get_ethtool_stats = lan78xx_get_stats,
1929 	.get_sset_count = lan78xx_get_sset_count,
1930 	.get_strings	= lan78xx_get_strings,
1931 	.get_wol	= lan78xx_get_wol,
1932 	.set_wol	= lan78xx_set_wol,
1933 	.get_ts_info	= ethtool_op_get_ts_info,
1934 	.get_eee	= lan78xx_get_eee,
1935 	.set_eee	= lan78xx_set_eee,
1936 	.get_pauseparam	= lan78xx_get_pause,
1937 	.set_pauseparam	= lan78xx_set_pause,
1938 	.get_link_ksettings = lan78xx_get_link_ksettings,
1939 	.set_link_ksettings = lan78xx_set_link_ksettings,
1940 	.get_regs_len	= lan78xx_get_regs_len,
1941 	.get_regs	= lan78xx_get_regs,
1942 };
1943 
1944 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1945 {
1946 	u32 addr_lo, addr_hi;
1947 	u8 addr[6];
1948 
1949 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1950 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1951 
1952 	addr[0] = addr_lo & 0xFF;
1953 	addr[1] = (addr_lo >> 8) & 0xFF;
1954 	addr[2] = (addr_lo >> 16) & 0xFF;
1955 	addr[3] = (addr_lo >> 24) & 0xFF;
1956 	addr[4] = addr_hi & 0xFF;
1957 	addr[5] = (addr_hi >> 8) & 0xFF;
1958 
1959 	if (!is_valid_ether_addr(addr)) {
1960 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1961 			/* valid address present in Device Tree */
1962 			netif_dbg(dev, ifup, dev->net,
1963 				  "MAC address read from Device Tree");
1964 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1965 						 ETH_ALEN, addr) == 0) ||
1966 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1967 					      ETH_ALEN, addr) == 0)) &&
1968 			   is_valid_ether_addr(addr)) {
1969 			/* eeprom values are valid so use them */
1970 			netif_dbg(dev, ifup, dev->net,
1971 				  "MAC address read from EEPROM");
1972 		} else {
1973 			/* generate random MAC */
1974 			eth_random_addr(addr);
1975 			netif_dbg(dev, ifup, dev->net,
1976 				  "MAC address set to random addr");
1977 		}
1978 
1979 		addr_lo = addr[0] | (addr[1] << 8) |
1980 			  (addr[2] << 16) | (addr[3] << 24);
1981 		addr_hi = addr[4] | (addr[5] << 8);
1982 
1983 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1984 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1985 	}
1986 
1987 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1988 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1989 
1990 	eth_hw_addr_set(dev->net, addr);
1991 }
1992 
1993 /* MDIO read and write wrappers for phylib */
1994 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1995 {
1996 	struct lan78xx_net *dev = bus->priv;
1997 	u32 val, addr;
1998 	int ret;
1999 
2000 	ret = usb_autopm_get_interface(dev->intf);
2001 	if (ret < 0)
2002 		return ret;
2003 
2004 	mutex_lock(&dev->phy_mutex);
2005 
2006 	/* confirm MII not busy */
2007 	ret = lan78xx_phy_wait_not_busy(dev);
2008 	if (ret < 0)
2009 		goto done;
2010 
2011 	/* set the address, index & direction (read from PHY) */
2012 	addr = mii_access(phy_id, idx, MII_READ);
2013 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2014 
2015 	ret = lan78xx_phy_wait_not_busy(dev);
2016 	if (ret < 0)
2017 		goto done;
2018 
2019 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2020 
2021 	ret = (int)(val & 0xFFFF);
2022 
2023 done:
2024 	mutex_unlock(&dev->phy_mutex);
2025 	usb_autopm_put_interface(dev->intf);
2026 
2027 	return ret;
2028 }
2029 
2030 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2031 				 u16 regval)
2032 {
2033 	struct lan78xx_net *dev = bus->priv;
2034 	u32 val, addr;
2035 	int ret;
2036 
2037 	ret = usb_autopm_get_interface(dev->intf);
2038 	if (ret < 0)
2039 		return ret;
2040 
2041 	mutex_lock(&dev->phy_mutex);
2042 
2043 	/* confirm MII not busy */
2044 	ret = lan78xx_phy_wait_not_busy(dev);
2045 	if (ret < 0)
2046 		goto done;
2047 
2048 	val = (u32)regval;
2049 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2050 
2051 	/* set the address, index & direction (write to PHY) */
2052 	addr = mii_access(phy_id, idx, MII_WRITE);
2053 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2054 
2055 	ret = lan78xx_phy_wait_not_busy(dev);
2056 	if (ret < 0)
2057 		goto done;
2058 
2059 done:
2060 	mutex_unlock(&dev->phy_mutex);
2061 	usb_autopm_put_interface(dev->intf);
2062 	return 0;
2063 }
2064 
2065 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2066 {
2067 	struct device_node *node;
2068 	int ret;
2069 
2070 	dev->mdiobus = mdiobus_alloc();
2071 	if (!dev->mdiobus) {
2072 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2073 		return -ENOMEM;
2074 	}
2075 
2076 	dev->mdiobus->priv = (void *)dev;
2077 	dev->mdiobus->read = lan78xx_mdiobus_read;
2078 	dev->mdiobus->write = lan78xx_mdiobus_write;
2079 	dev->mdiobus->name = "lan78xx-mdiobus";
2080 	dev->mdiobus->parent = &dev->udev->dev;
2081 
2082 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2083 		 dev->udev->bus->busnum, dev->udev->devnum);
2084 
2085 	switch (dev->chipid) {
2086 	case ID_REV_CHIP_ID_7800_:
2087 	case ID_REV_CHIP_ID_7850_:
2088 		/* set to internal PHY id */
2089 		dev->mdiobus->phy_mask = ~(1 << 1);
2090 		break;
2091 	case ID_REV_CHIP_ID_7801_:
2092 		/* scan thru PHYAD[2..0] */
2093 		dev->mdiobus->phy_mask = ~(0xFF);
2094 		break;
2095 	}
2096 
2097 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2098 	ret = of_mdiobus_register(dev->mdiobus, node);
2099 	of_node_put(node);
2100 	if (ret) {
2101 		netdev_err(dev->net, "can't register MDIO bus\n");
2102 		goto exit1;
2103 	}
2104 
2105 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2106 	return 0;
2107 exit1:
2108 	mdiobus_free(dev->mdiobus);
2109 	return ret;
2110 }
2111 
2112 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2113 {
2114 	mdiobus_unregister(dev->mdiobus);
2115 	mdiobus_free(dev->mdiobus);
2116 }
2117 
2118 static void lan78xx_link_status_change(struct net_device *net)
2119 {
2120 	struct phy_device *phydev = net->phydev;
2121 	int temp;
2122 
2123 	/* At forced 100 F/H mode, chip may fail to set mode correctly
2124 	 * when cable is switched between long(~50+m) and short one.
2125 	 * As workaround, set to 10 before setting to 100
2126 	 * at forced 100 F/H mode.
2127 	 */
2128 	if (!phydev->autoneg && (phydev->speed == 100)) {
2129 		/* disable phy interrupt */
2130 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2131 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
2132 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2133 
2134 		temp = phy_read(phydev, MII_BMCR);
2135 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
2136 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
2137 		temp |= BMCR_SPEED100;
2138 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
2139 
2140 		/* clear pending interrupt generated while workaround */
2141 		temp = phy_read(phydev, LAN88XX_INT_STS);
2142 
2143 		/* enable phy interrupt back */
2144 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2145 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
2146 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2147 	}
2148 }
2149 
2150 static int irq_map(struct irq_domain *d, unsigned int irq,
2151 		   irq_hw_number_t hwirq)
2152 {
2153 	struct irq_domain_data *data = d->host_data;
2154 
2155 	irq_set_chip_data(irq, data);
2156 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2157 	irq_set_noprobe(irq);
2158 
2159 	return 0;
2160 }
2161 
2162 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2163 {
2164 	irq_set_chip_and_handler(irq, NULL, NULL);
2165 	irq_set_chip_data(irq, NULL);
2166 }
2167 
2168 static const struct irq_domain_ops chip_domain_ops = {
2169 	.map	= irq_map,
2170 	.unmap	= irq_unmap,
2171 };
2172 
2173 static void lan78xx_irq_mask(struct irq_data *irqd)
2174 {
2175 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2176 
2177 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2178 }
2179 
2180 static void lan78xx_irq_unmask(struct irq_data *irqd)
2181 {
2182 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2183 
2184 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2185 }
2186 
2187 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2188 {
2189 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2190 
2191 	mutex_lock(&data->irq_lock);
2192 }
2193 
2194 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2195 {
2196 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2197 	struct lan78xx_net *dev =
2198 			container_of(data, struct lan78xx_net, domain_data);
2199 	u32 buf;
2200 
2201 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2202 	 * are only two callbacks executed in non-atomic contex.
2203 	 */
2204 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2205 	if (buf != data->irqenable)
2206 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2207 
2208 	mutex_unlock(&data->irq_lock);
2209 }
2210 
2211 static struct irq_chip lan78xx_irqchip = {
2212 	.name			= "lan78xx-irqs",
2213 	.irq_mask		= lan78xx_irq_mask,
2214 	.irq_unmask		= lan78xx_irq_unmask,
2215 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2216 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2217 };
2218 
2219 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2220 {
2221 	struct device_node *of_node;
2222 	struct irq_domain *irqdomain;
2223 	unsigned int irqmap = 0;
2224 	u32 buf;
2225 	int ret = 0;
2226 
2227 	of_node = dev->udev->dev.parent->of_node;
2228 
2229 	mutex_init(&dev->domain_data.irq_lock);
2230 
2231 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2232 	dev->domain_data.irqenable = buf;
2233 
2234 	dev->domain_data.irqchip = &lan78xx_irqchip;
2235 	dev->domain_data.irq_handler = handle_simple_irq;
2236 
2237 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2238 					  &chip_domain_ops, &dev->domain_data);
2239 	if (irqdomain) {
2240 		/* create mapping for PHY interrupt */
2241 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2242 		if (!irqmap) {
2243 			irq_domain_remove(irqdomain);
2244 
2245 			irqdomain = NULL;
2246 			ret = -EINVAL;
2247 		}
2248 	} else {
2249 		ret = -EINVAL;
2250 	}
2251 
2252 	dev->domain_data.irqdomain = irqdomain;
2253 	dev->domain_data.phyirq = irqmap;
2254 
2255 	return ret;
2256 }
2257 
2258 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2259 {
2260 	if (dev->domain_data.phyirq > 0) {
2261 		irq_dispose_mapping(dev->domain_data.phyirq);
2262 
2263 		if (dev->domain_data.irqdomain)
2264 			irq_domain_remove(dev->domain_data.irqdomain);
2265 	}
2266 	dev->domain_data.phyirq = 0;
2267 	dev->domain_data.irqdomain = NULL;
2268 }
2269 
2270 static int lan8835_fixup(struct phy_device *phydev)
2271 {
2272 	int buf;
2273 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2274 
2275 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2276 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2277 	buf &= ~0x1800;
2278 	buf |= 0x0800;
2279 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2280 
2281 	/* RGMII MAC TXC Delay Enable */
2282 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2283 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2284 
2285 	/* RGMII TX DLL Tune Adjust */
2286 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2287 
2288 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2289 
2290 	return 1;
2291 }
2292 
2293 static int ksz9031rnx_fixup(struct phy_device *phydev)
2294 {
2295 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2296 
2297 	/* Micrel9301RNX PHY configuration */
2298 	/* RGMII Control Signal Pad Skew */
2299 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2300 	/* RGMII RX Data Pad Skew */
2301 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2302 	/* RGMII RX Clock Pad Skew */
2303 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2304 
2305 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2306 
2307 	return 1;
2308 }
2309 
2310 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2311 {
2312 	u32 buf;
2313 	int ret;
2314 	struct fixed_phy_status fphy_status = {
2315 		.link = 1,
2316 		.speed = SPEED_1000,
2317 		.duplex = DUPLEX_FULL,
2318 	};
2319 	struct phy_device *phydev;
2320 
2321 	phydev = phy_find_first(dev->mdiobus);
2322 	if (!phydev) {
2323 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2324 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2325 		if (IS_ERR(phydev)) {
2326 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2327 			return NULL;
2328 		}
2329 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2330 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2331 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2332 					MAC_RGMII_ID_TXC_DELAY_EN_);
2333 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2334 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2335 		buf |= HW_CFG_CLK125_EN_;
2336 		buf |= HW_CFG_REFCLK25_EN_;
2337 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2338 	} else {
2339 		if (!phydev->drv) {
2340 			netdev_err(dev->net, "no PHY driver found\n");
2341 			return NULL;
2342 		}
2343 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2344 		/* external PHY fixup for KSZ9031RNX */
2345 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2346 						 ksz9031rnx_fixup);
2347 		if (ret < 0) {
2348 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2349 			return NULL;
2350 		}
2351 		/* external PHY fixup for LAN8835 */
2352 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2353 						 lan8835_fixup);
2354 		if (ret < 0) {
2355 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2356 			return NULL;
2357 		}
2358 		/* add more external PHY fixup here if needed */
2359 
2360 		phydev->is_internal = false;
2361 	}
2362 	return phydev;
2363 }
2364 
2365 static int lan78xx_phy_init(struct lan78xx_net *dev)
2366 {
2367 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2368 	int ret;
2369 	u32 mii_adv;
2370 	struct phy_device *phydev;
2371 
2372 	switch (dev->chipid) {
2373 	case ID_REV_CHIP_ID_7801_:
2374 		phydev = lan7801_phy_init(dev);
2375 		if (!phydev) {
2376 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2377 			return -EIO;
2378 		}
2379 		break;
2380 
2381 	case ID_REV_CHIP_ID_7800_:
2382 	case ID_REV_CHIP_ID_7850_:
2383 		phydev = phy_find_first(dev->mdiobus);
2384 		if (!phydev) {
2385 			netdev_err(dev->net, "no PHY found\n");
2386 			return -EIO;
2387 		}
2388 		phydev->is_internal = true;
2389 		dev->interface = PHY_INTERFACE_MODE_GMII;
2390 		break;
2391 
2392 	default:
2393 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2394 		return -EIO;
2395 	}
2396 
2397 	/* if phyirq is not set, use polling mode in phylib */
2398 	if (dev->domain_data.phyirq > 0)
2399 		phydev->irq = dev->domain_data.phyirq;
2400 	else
2401 		phydev->irq = PHY_POLL;
2402 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2403 
2404 	/* set to AUTOMDIX */
2405 	phydev->mdix = ETH_TP_MDI_AUTO;
2406 
2407 	ret = phy_connect_direct(dev->net, phydev,
2408 				 lan78xx_link_status_change,
2409 				 dev->interface);
2410 	if (ret) {
2411 		netdev_err(dev->net, "can't attach PHY to %s\n",
2412 			   dev->mdiobus->id);
2413 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2414 			if (phy_is_pseudo_fixed_link(phydev)) {
2415 				fixed_phy_unregister(phydev);
2416 			} else {
2417 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2418 							     0xfffffff0);
2419 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2420 							     0xfffffff0);
2421 			}
2422 		}
2423 		return -EIO;
2424 	}
2425 
2426 	/* MAC doesn't support 1000T Half */
2427 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2428 
2429 	/* support both flow controls */
2430 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2431 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2432 			   phydev->advertising);
2433 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2434 			   phydev->advertising);
2435 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2436 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2437 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2438 
2439 	if (phydev->mdio.dev.of_node) {
2440 		u32 reg;
2441 		int len;
2442 
2443 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2444 						      "microchip,led-modes",
2445 						      sizeof(u32));
2446 		if (len >= 0) {
2447 			/* Ensure the appropriate LEDs are enabled */
2448 			lan78xx_read_reg(dev, HW_CFG, &reg);
2449 			reg &= ~(HW_CFG_LED0_EN_ |
2450 				 HW_CFG_LED1_EN_ |
2451 				 HW_CFG_LED2_EN_ |
2452 				 HW_CFG_LED3_EN_);
2453 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2454 				(len > 1) * HW_CFG_LED1_EN_ |
2455 				(len > 2) * HW_CFG_LED2_EN_ |
2456 				(len > 3) * HW_CFG_LED3_EN_;
2457 			lan78xx_write_reg(dev, HW_CFG, reg);
2458 		}
2459 	}
2460 
2461 	genphy_config_aneg(phydev);
2462 
2463 	dev->fc_autoneg = phydev->autoneg;
2464 
2465 	return 0;
2466 }
2467 
2468 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2469 {
2470 	u32 buf;
2471 	bool rxenabled;
2472 
2473 	lan78xx_read_reg(dev, MAC_RX, &buf);
2474 
2475 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2476 
2477 	if (rxenabled) {
2478 		buf &= ~MAC_RX_RXEN_;
2479 		lan78xx_write_reg(dev, MAC_RX, buf);
2480 	}
2481 
2482 	/* add 4 to size for FCS */
2483 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2484 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2485 
2486 	lan78xx_write_reg(dev, MAC_RX, buf);
2487 
2488 	if (rxenabled) {
2489 		buf |= MAC_RX_RXEN_;
2490 		lan78xx_write_reg(dev, MAC_RX, buf);
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2497 {
2498 	struct sk_buff *skb;
2499 	unsigned long flags;
2500 	int count = 0;
2501 
2502 	spin_lock_irqsave(&q->lock, flags);
2503 	while (!skb_queue_empty(q)) {
2504 		struct skb_data	*entry;
2505 		struct urb *urb;
2506 		int ret;
2507 
2508 		skb_queue_walk(q, skb) {
2509 			entry = (struct skb_data *)skb->cb;
2510 			if (entry->state != unlink_start)
2511 				goto found;
2512 		}
2513 		break;
2514 found:
2515 		entry->state = unlink_start;
2516 		urb = entry->urb;
2517 
2518 		/* Get reference count of the URB to avoid it to be
2519 		 * freed during usb_unlink_urb, which may trigger
2520 		 * use-after-free problem inside usb_unlink_urb since
2521 		 * usb_unlink_urb is always racing with .complete
2522 		 * handler(include defer_bh).
2523 		 */
2524 		usb_get_urb(urb);
2525 		spin_unlock_irqrestore(&q->lock, flags);
2526 		/* during some PM-driven resume scenarios,
2527 		 * these (async) unlinks complete immediately
2528 		 */
2529 		ret = usb_unlink_urb(urb);
2530 		if (ret != -EINPROGRESS && ret != 0)
2531 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2532 		else
2533 			count++;
2534 		usb_put_urb(urb);
2535 		spin_lock_irqsave(&q->lock, flags);
2536 	}
2537 	spin_unlock_irqrestore(&q->lock, flags);
2538 	return count;
2539 }
2540 
2541 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2542 {
2543 	struct lan78xx_net *dev = netdev_priv(netdev);
2544 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2545 	int ret;
2546 
2547 	/* no second zero-length packet read wanted after mtu-sized packets */
2548 	if ((max_frame_len % dev->maxpacket) == 0)
2549 		return -EDOM;
2550 
2551 	ret = usb_autopm_get_interface(dev->intf);
2552 	if (ret < 0)
2553 		return ret;
2554 
2555 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2556 	if (!ret)
2557 		netdev->mtu = new_mtu;
2558 
2559 	usb_autopm_put_interface(dev->intf);
2560 
2561 	return ret;
2562 }
2563 
2564 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2565 {
2566 	struct lan78xx_net *dev = netdev_priv(netdev);
2567 	struct sockaddr *addr = p;
2568 	u32 addr_lo, addr_hi;
2569 
2570 	if (netif_running(netdev))
2571 		return -EBUSY;
2572 
2573 	if (!is_valid_ether_addr(addr->sa_data))
2574 		return -EADDRNOTAVAIL;
2575 
2576 	eth_hw_addr_set(netdev, addr->sa_data);
2577 
2578 	addr_lo = netdev->dev_addr[0] |
2579 		  netdev->dev_addr[1] << 8 |
2580 		  netdev->dev_addr[2] << 16 |
2581 		  netdev->dev_addr[3] << 24;
2582 	addr_hi = netdev->dev_addr[4] |
2583 		  netdev->dev_addr[5] << 8;
2584 
2585 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2586 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2587 
2588 	/* Added to support MAC address changes */
2589 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2590 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2591 
2592 	return 0;
2593 }
2594 
2595 /* Enable or disable Rx checksum offload engine */
2596 static int lan78xx_set_features(struct net_device *netdev,
2597 				netdev_features_t features)
2598 {
2599 	struct lan78xx_net *dev = netdev_priv(netdev);
2600 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2601 	unsigned long flags;
2602 
2603 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2604 
2605 	if (features & NETIF_F_RXCSUM) {
2606 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2607 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2608 	} else {
2609 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2610 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2611 	}
2612 
2613 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2614 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2615 	else
2616 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2617 
2618 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2619 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2620 	else
2621 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2622 
2623 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2624 
2625 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2626 
2627 	return 0;
2628 }
2629 
2630 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2631 {
2632 	struct lan78xx_priv *pdata =
2633 			container_of(param, struct lan78xx_priv, set_vlan);
2634 	struct lan78xx_net *dev = pdata->dev;
2635 
2636 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2637 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2638 }
2639 
2640 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2641 				   __be16 proto, u16 vid)
2642 {
2643 	struct lan78xx_net *dev = netdev_priv(netdev);
2644 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2645 	u16 vid_bit_index;
2646 	u16 vid_dword_index;
2647 
2648 	vid_dword_index = (vid >> 5) & 0x7F;
2649 	vid_bit_index = vid & 0x1F;
2650 
2651 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2652 
2653 	/* defer register writes to a sleepable context */
2654 	schedule_work(&pdata->set_vlan);
2655 
2656 	return 0;
2657 }
2658 
2659 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2660 				    __be16 proto, u16 vid)
2661 {
2662 	struct lan78xx_net *dev = netdev_priv(netdev);
2663 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2664 	u16 vid_bit_index;
2665 	u16 vid_dword_index;
2666 
2667 	vid_dword_index = (vid >> 5) & 0x7F;
2668 	vid_bit_index = vid & 0x1F;
2669 
2670 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2671 
2672 	/* defer register writes to a sleepable context */
2673 	schedule_work(&pdata->set_vlan);
2674 
2675 	return 0;
2676 }
2677 
2678 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2679 {
2680 	int ret;
2681 	u32 buf;
2682 	u32 regs[6] = { 0 };
2683 
2684 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2685 	if (buf & USB_CFG1_LTM_ENABLE_) {
2686 		u8 temp[2];
2687 		/* Get values from EEPROM first */
2688 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2689 			if (temp[0] == 24) {
2690 				ret = lan78xx_read_raw_eeprom(dev,
2691 							      temp[1] * 2,
2692 							      24,
2693 							      (u8 *)regs);
2694 				if (ret < 0)
2695 					return;
2696 			}
2697 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2698 			if (temp[0] == 24) {
2699 				ret = lan78xx_read_raw_otp(dev,
2700 							   temp[1] * 2,
2701 							   24,
2702 							   (u8 *)regs);
2703 				if (ret < 0)
2704 					return;
2705 			}
2706 		}
2707 	}
2708 
2709 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2710 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2711 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2712 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2713 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2714 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2715 }
2716 
2717 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2718 {
2719 	int result = 0;
2720 
2721 	switch (dev->udev->speed) {
2722 	case USB_SPEED_SUPER:
2723 		dev->rx_urb_size = RX_SS_URB_SIZE;
2724 		dev->tx_urb_size = TX_SS_URB_SIZE;
2725 		dev->n_rx_urbs = RX_SS_URB_NUM;
2726 		dev->n_tx_urbs = TX_SS_URB_NUM;
2727 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2728 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2729 		break;
2730 	case USB_SPEED_HIGH:
2731 		dev->rx_urb_size = RX_HS_URB_SIZE;
2732 		dev->tx_urb_size = TX_HS_URB_SIZE;
2733 		dev->n_rx_urbs = RX_HS_URB_NUM;
2734 		dev->n_tx_urbs = TX_HS_URB_NUM;
2735 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2736 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2737 		break;
2738 	case USB_SPEED_FULL:
2739 		dev->rx_urb_size = RX_FS_URB_SIZE;
2740 		dev->tx_urb_size = TX_FS_URB_SIZE;
2741 		dev->n_rx_urbs = RX_FS_URB_NUM;
2742 		dev->n_tx_urbs = TX_FS_URB_NUM;
2743 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2744 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2745 		break;
2746 	default:
2747 		netdev_warn(dev->net, "USB bus speed not supported\n");
2748 		result = -EIO;
2749 		break;
2750 	}
2751 
2752 	return result;
2753 }
2754 
2755 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2756 {
2757 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2758 }
2759 
2760 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2761 			   u32 hw_disabled)
2762 {
2763 	unsigned long timeout;
2764 	bool stopped = true;
2765 	int ret;
2766 	u32 buf;
2767 
2768 	/* Stop the h/w block (if not already stopped) */
2769 
2770 	ret = lan78xx_read_reg(dev, reg, &buf);
2771 	if (ret < 0)
2772 		return ret;
2773 
2774 	if (buf & hw_enabled) {
2775 		buf &= ~hw_enabled;
2776 
2777 		ret = lan78xx_write_reg(dev, reg, buf);
2778 		if (ret < 0)
2779 			return ret;
2780 
2781 		stopped = false;
2782 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2783 		do  {
2784 			ret = lan78xx_read_reg(dev, reg, &buf);
2785 			if (ret < 0)
2786 				return ret;
2787 
2788 			if (buf & hw_disabled)
2789 				stopped = true;
2790 			else
2791 				msleep(HW_DISABLE_DELAY_MS);
2792 		} while (!stopped && !time_after(jiffies, timeout));
2793 	}
2794 
2795 	ret = stopped ? 0 : -ETIME;
2796 
2797 	return ret;
2798 }
2799 
2800 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2801 {
2802 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2803 }
2804 
2805 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2806 {
2807 	int ret;
2808 
2809 	netif_dbg(dev, drv, dev->net, "start tx path");
2810 
2811 	/* Start the MAC transmitter */
2812 
2813 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2814 	if (ret < 0)
2815 		return ret;
2816 
2817 	/* Start the Tx FIFO */
2818 
2819 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2820 	if (ret < 0)
2821 		return ret;
2822 
2823 	return 0;
2824 }
2825 
2826 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2827 {
2828 	int ret;
2829 
2830 	netif_dbg(dev, drv, dev->net, "stop tx path");
2831 
2832 	/* Stop the Tx FIFO */
2833 
2834 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2835 	if (ret < 0)
2836 		return ret;
2837 
2838 	/* Stop the MAC transmitter */
2839 
2840 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2841 	if (ret < 0)
2842 		return ret;
2843 
2844 	return 0;
2845 }
2846 
2847 /* The caller must ensure the Tx path is stopped before calling
2848  * lan78xx_flush_tx_fifo().
2849  */
2850 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2851 {
2852 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2853 }
2854 
2855 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2856 {
2857 	int ret;
2858 
2859 	netif_dbg(dev, drv, dev->net, "start rx path");
2860 
2861 	/* Start the Rx FIFO */
2862 
2863 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2864 	if (ret < 0)
2865 		return ret;
2866 
2867 	/* Start the MAC receiver*/
2868 
2869 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2870 	if (ret < 0)
2871 		return ret;
2872 
2873 	return 0;
2874 }
2875 
2876 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2877 {
2878 	int ret;
2879 
2880 	netif_dbg(dev, drv, dev->net, "stop rx path");
2881 
2882 	/* Stop the MAC receiver */
2883 
2884 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2885 	if (ret < 0)
2886 		return ret;
2887 
2888 	/* Stop the Rx FIFO */
2889 
2890 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2891 	if (ret < 0)
2892 		return ret;
2893 
2894 	return 0;
2895 }
2896 
2897 /* The caller must ensure the Rx path is stopped before calling
2898  * lan78xx_flush_rx_fifo().
2899  */
2900 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2901 {
2902 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2903 }
2904 
2905 static int lan78xx_reset(struct lan78xx_net *dev)
2906 {
2907 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2908 	unsigned long timeout;
2909 	int ret;
2910 	u32 buf;
2911 	u8 sig;
2912 
2913 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2914 	if (ret < 0)
2915 		return ret;
2916 
2917 	buf |= HW_CFG_LRST_;
2918 
2919 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2920 	if (ret < 0)
2921 		return ret;
2922 
2923 	timeout = jiffies + HZ;
2924 	do {
2925 		mdelay(1);
2926 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2927 		if (ret < 0)
2928 			return ret;
2929 
2930 		if (time_after(jiffies, timeout)) {
2931 			netdev_warn(dev->net,
2932 				    "timeout on completion of LiteReset");
2933 			ret = -ETIMEDOUT;
2934 			return ret;
2935 		}
2936 	} while (buf & HW_CFG_LRST_);
2937 
2938 	lan78xx_init_mac_address(dev);
2939 
2940 	/* save DEVID for later usage */
2941 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2942 	if (ret < 0)
2943 		return ret;
2944 
2945 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2946 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2947 
2948 	/* Respond to the IN token with a NAK */
2949 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2950 	if (ret < 0)
2951 		return ret;
2952 
2953 	buf |= USB_CFG_BIR_;
2954 
2955 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2956 	if (ret < 0)
2957 		return ret;
2958 
2959 	/* Init LTM */
2960 	lan78xx_init_ltm(dev);
2961 
2962 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2963 	if (ret < 0)
2964 		return ret;
2965 
2966 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2967 	if (ret < 0)
2968 		return ret;
2969 
2970 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2971 	if (ret < 0)
2972 		return ret;
2973 
2974 	buf |= HW_CFG_MEF_;
2975 
2976 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2977 	if (ret < 0)
2978 		return ret;
2979 
2980 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2981 	if (ret < 0)
2982 		return ret;
2983 
2984 	buf |= USB_CFG_BCE_;
2985 
2986 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	/* set FIFO sizes */
2991 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2992 
2993 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2998 
2999 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3004 	if (ret < 0)
3005 		return ret;
3006 
3007 	ret = lan78xx_write_reg(dev, FLOW, 0);
3008 	if (ret < 0)
3009 		return ret;
3010 
3011 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3012 	if (ret < 0)
3013 		return ret;
3014 
3015 	/* Don't need rfe_ctl_lock during initialisation */
3016 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3017 	if (ret < 0)
3018 		return ret;
3019 
3020 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3021 
3022 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3023 	if (ret < 0)
3024 		return ret;
3025 
3026 	/* Enable or disable checksum offload engines */
3027 	ret = lan78xx_set_features(dev->net, dev->net->features);
3028 	if (ret < 0)
3029 		return ret;
3030 
3031 	lan78xx_set_multicast(dev->net);
3032 
3033 	/* reset PHY */
3034 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3035 	if (ret < 0)
3036 		return ret;
3037 
3038 	buf |= PMT_CTL_PHY_RST_;
3039 
3040 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3041 	if (ret < 0)
3042 		return ret;
3043 
3044 	timeout = jiffies + HZ;
3045 	do {
3046 		mdelay(1);
3047 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3048 		if (ret < 0)
3049 			return ret;
3050 
3051 		if (time_after(jiffies, timeout)) {
3052 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3053 			ret = -ETIMEDOUT;
3054 			return ret;
3055 		}
3056 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3057 
3058 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3059 	if (ret < 0)
3060 		return ret;
3061 
3062 	/* LAN7801 only has RGMII mode */
3063 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3064 		buf &= ~MAC_CR_GMII_EN_;
3065 
3066 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3067 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3068 		if (!ret && sig != EEPROM_INDICATOR) {
3069 			/* Implies there is no external eeprom. Set mac speed */
3070 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3071 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3072 		}
3073 	}
3074 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3075 	if (ret < 0)
3076 		return ret;
3077 
3078 	ret = lan78xx_set_rx_max_frame_length(dev,
3079 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3080 
3081 	return ret;
3082 }
3083 
3084 static void lan78xx_init_stats(struct lan78xx_net *dev)
3085 {
3086 	u32 *p;
3087 	int i;
3088 
3089 	/* initialize for stats update
3090 	 * some counters are 20bits and some are 32bits
3091 	 */
3092 	p = (u32 *)&dev->stats.rollover_max;
3093 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3094 		p[i] = 0xFFFFF;
3095 
3096 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3097 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3098 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3099 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3100 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3101 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3102 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3103 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3104 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3105 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3106 
3107 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3108 }
3109 
3110 static int lan78xx_open(struct net_device *net)
3111 {
3112 	struct lan78xx_net *dev = netdev_priv(net);
3113 	int ret;
3114 
3115 	netif_dbg(dev, ifup, dev->net, "open device");
3116 
3117 	ret = usb_autopm_get_interface(dev->intf);
3118 	if (ret < 0)
3119 		return ret;
3120 
3121 	mutex_lock(&dev->dev_mutex);
3122 
3123 	phy_start(net->phydev);
3124 
3125 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3126 
3127 	/* for Link Check */
3128 	if (dev->urb_intr) {
3129 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3130 		if (ret < 0) {
3131 			netif_err(dev, ifup, dev->net,
3132 				  "intr submit %d\n", ret);
3133 			goto done;
3134 		}
3135 	}
3136 
3137 	ret = lan78xx_flush_rx_fifo(dev);
3138 	if (ret < 0)
3139 		goto done;
3140 	ret = lan78xx_flush_tx_fifo(dev);
3141 	if (ret < 0)
3142 		goto done;
3143 
3144 	ret = lan78xx_start_tx_path(dev);
3145 	if (ret < 0)
3146 		goto done;
3147 	ret = lan78xx_start_rx_path(dev);
3148 	if (ret < 0)
3149 		goto done;
3150 
3151 	lan78xx_init_stats(dev);
3152 
3153 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3154 
3155 	netif_start_queue(net);
3156 
3157 	dev->link_on = false;
3158 
3159 	napi_enable(&dev->napi);
3160 
3161 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3162 done:
3163 	mutex_unlock(&dev->dev_mutex);
3164 
3165 	usb_autopm_put_interface(dev->intf);
3166 
3167 	return ret;
3168 }
3169 
3170 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3171 {
3172 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3173 	DECLARE_WAITQUEUE(wait, current);
3174 	int temp;
3175 
3176 	/* ensure there are no more active urbs */
3177 	add_wait_queue(&unlink_wakeup, &wait);
3178 	set_current_state(TASK_UNINTERRUPTIBLE);
3179 	dev->wait = &unlink_wakeup;
3180 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3181 
3182 	/* maybe wait for deletions to finish. */
3183 	while (!skb_queue_empty(&dev->rxq) ||
3184 	       !skb_queue_empty(&dev->txq)) {
3185 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3186 		set_current_state(TASK_UNINTERRUPTIBLE);
3187 		netif_dbg(dev, ifdown, dev->net,
3188 			  "waited for %d urb completions", temp);
3189 	}
3190 	set_current_state(TASK_RUNNING);
3191 	dev->wait = NULL;
3192 	remove_wait_queue(&unlink_wakeup, &wait);
3193 
3194 	/* empty Rx done, Rx overflow and Tx pend queues
3195 	 */
3196 	while (!skb_queue_empty(&dev->rxq_done)) {
3197 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3198 
3199 		lan78xx_release_rx_buf(dev, skb);
3200 	}
3201 
3202 	skb_queue_purge(&dev->rxq_overflow);
3203 	skb_queue_purge(&dev->txq_pend);
3204 }
3205 
3206 static int lan78xx_stop(struct net_device *net)
3207 {
3208 	struct lan78xx_net *dev = netdev_priv(net);
3209 
3210 	netif_dbg(dev, ifup, dev->net, "stop device");
3211 
3212 	mutex_lock(&dev->dev_mutex);
3213 
3214 	if (timer_pending(&dev->stat_monitor))
3215 		del_timer_sync(&dev->stat_monitor);
3216 
3217 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3218 	netif_stop_queue(net);
3219 	napi_disable(&dev->napi);
3220 
3221 	lan78xx_terminate_urbs(dev);
3222 
3223 	netif_info(dev, ifdown, dev->net,
3224 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3225 		   net->stats.rx_packets, net->stats.tx_packets,
3226 		   net->stats.rx_errors, net->stats.tx_errors);
3227 
3228 	/* ignore errors that occur stopping the Tx and Rx data paths */
3229 	lan78xx_stop_tx_path(dev);
3230 	lan78xx_stop_rx_path(dev);
3231 
3232 	if (net->phydev)
3233 		phy_stop(net->phydev);
3234 
3235 	usb_kill_urb(dev->urb_intr);
3236 
3237 	/* deferred work (task, timer, softirq) must also stop.
3238 	 * can't flush_scheduled_work() until we drop rtnl (later),
3239 	 * else workers could deadlock; so make workers a NOP.
3240 	 */
3241 	clear_bit(EVENT_TX_HALT, &dev->flags);
3242 	clear_bit(EVENT_RX_HALT, &dev->flags);
3243 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3244 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3245 
3246 	cancel_delayed_work_sync(&dev->wq);
3247 
3248 	usb_autopm_put_interface(dev->intf);
3249 
3250 	mutex_unlock(&dev->dev_mutex);
3251 
3252 	return 0;
3253 }
3254 
3255 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3256 			       struct sk_buff_head *list, enum skb_state state)
3257 {
3258 	unsigned long flags;
3259 	enum skb_state old_state;
3260 	struct skb_data *entry = (struct skb_data *)skb->cb;
3261 
3262 	spin_lock_irqsave(&list->lock, flags);
3263 	old_state = entry->state;
3264 	entry->state = state;
3265 
3266 	__skb_unlink(skb, list);
3267 	spin_unlock(&list->lock);
3268 	spin_lock(&dev->rxq_done.lock);
3269 
3270 	__skb_queue_tail(&dev->rxq_done, skb);
3271 	if (skb_queue_len(&dev->rxq_done) == 1)
3272 		napi_schedule(&dev->napi);
3273 
3274 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3275 
3276 	return old_state;
3277 }
3278 
3279 static void tx_complete(struct urb *urb)
3280 {
3281 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3282 	struct skb_data *entry = (struct skb_data *)skb->cb;
3283 	struct lan78xx_net *dev = entry->dev;
3284 
3285 	if (urb->status == 0) {
3286 		dev->net->stats.tx_packets += entry->num_of_packet;
3287 		dev->net->stats.tx_bytes += entry->length;
3288 	} else {
3289 		dev->net->stats.tx_errors += entry->num_of_packet;
3290 
3291 		switch (urb->status) {
3292 		case -EPIPE:
3293 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3294 			break;
3295 
3296 		/* software-driven interface shutdown */
3297 		case -ECONNRESET:
3298 		case -ESHUTDOWN:
3299 			netif_dbg(dev, tx_err, dev->net,
3300 				  "tx err interface gone %d\n",
3301 				  entry->urb->status);
3302 			break;
3303 
3304 		case -EPROTO:
3305 		case -ETIME:
3306 		case -EILSEQ:
3307 			netif_stop_queue(dev->net);
3308 			netif_dbg(dev, tx_err, dev->net,
3309 				  "tx err queue stopped %d\n",
3310 				  entry->urb->status);
3311 			break;
3312 		default:
3313 			netif_dbg(dev, tx_err, dev->net,
3314 				  "unknown tx err %d\n",
3315 				  entry->urb->status);
3316 			break;
3317 		}
3318 	}
3319 
3320 	usb_autopm_put_interface_async(dev->intf);
3321 
3322 	skb_unlink(skb, &dev->txq);
3323 
3324 	lan78xx_release_tx_buf(dev, skb);
3325 
3326 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3327 	 */
3328 	if (skb_queue_empty(&dev->txq) &&
3329 	    !skb_queue_empty(&dev->txq_pend))
3330 		napi_schedule(&dev->napi);
3331 }
3332 
3333 static void lan78xx_queue_skb(struct sk_buff_head *list,
3334 			      struct sk_buff *newsk, enum skb_state state)
3335 {
3336 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3337 
3338 	__skb_queue_tail(list, newsk);
3339 	entry->state = state;
3340 }
3341 
3342 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3343 {
3344 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3345 }
3346 
3347 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3348 {
3349 	return dev->tx_pend_data_len;
3350 }
3351 
3352 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3353 				    struct sk_buff *skb,
3354 				    unsigned int *tx_pend_data_len)
3355 {
3356 	unsigned long flags;
3357 
3358 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3359 
3360 	__skb_queue_tail(&dev->txq_pend, skb);
3361 
3362 	dev->tx_pend_data_len += skb->len;
3363 	*tx_pend_data_len = dev->tx_pend_data_len;
3364 
3365 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3366 }
3367 
3368 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3369 					 struct sk_buff *skb,
3370 					 unsigned int *tx_pend_data_len)
3371 {
3372 	unsigned long flags;
3373 
3374 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3375 
3376 	__skb_queue_head(&dev->txq_pend, skb);
3377 
3378 	dev->tx_pend_data_len += skb->len;
3379 	*tx_pend_data_len = dev->tx_pend_data_len;
3380 
3381 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3382 }
3383 
3384 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3385 				    struct sk_buff **skb,
3386 				    unsigned int *tx_pend_data_len)
3387 {
3388 	unsigned long flags;
3389 
3390 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3391 
3392 	*skb = __skb_dequeue(&dev->txq_pend);
3393 	if (*skb)
3394 		dev->tx_pend_data_len -= (*skb)->len;
3395 	*tx_pend_data_len = dev->tx_pend_data_len;
3396 
3397 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3398 }
3399 
3400 static netdev_tx_t
3401 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3402 {
3403 	struct lan78xx_net *dev = netdev_priv(net);
3404 	unsigned int tx_pend_data_len;
3405 
3406 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3407 		schedule_delayed_work(&dev->wq, 0);
3408 
3409 	skb_tx_timestamp(skb);
3410 
3411 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3412 
3413 	/* Set up a Tx URB if none is in progress */
3414 
3415 	if (skb_queue_empty(&dev->txq))
3416 		napi_schedule(&dev->napi);
3417 
3418 	/* Stop stack Tx queue if we have enough data to fill
3419 	 * all the free Tx URBs.
3420 	 */
3421 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3422 		netif_stop_queue(net);
3423 
3424 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3425 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3426 
3427 		/* Kick off transmission of pending data */
3428 
3429 		if (!skb_queue_empty(&dev->txq_free))
3430 			napi_schedule(&dev->napi);
3431 	}
3432 
3433 	return NETDEV_TX_OK;
3434 }
3435 
3436 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3437 {
3438 	struct lan78xx_priv *pdata = NULL;
3439 	int ret;
3440 	int i;
3441 
3442 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3443 
3444 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3445 	if (!pdata) {
3446 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3447 		return -ENOMEM;
3448 	}
3449 
3450 	pdata->dev = dev;
3451 
3452 	spin_lock_init(&pdata->rfe_ctl_lock);
3453 	mutex_init(&pdata->dataport_mutex);
3454 
3455 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3456 
3457 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3458 		pdata->vlan_table[i] = 0;
3459 
3460 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3461 
3462 	dev->net->features = 0;
3463 
3464 	if (DEFAULT_TX_CSUM_ENABLE)
3465 		dev->net->features |= NETIF_F_HW_CSUM;
3466 
3467 	if (DEFAULT_RX_CSUM_ENABLE)
3468 		dev->net->features |= NETIF_F_RXCSUM;
3469 
3470 	if (DEFAULT_TSO_CSUM_ENABLE)
3471 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3472 
3473 	if (DEFAULT_VLAN_RX_OFFLOAD)
3474 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3475 
3476 	if (DEFAULT_VLAN_FILTER_ENABLE)
3477 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3478 
3479 	dev->net->hw_features = dev->net->features;
3480 
3481 	ret = lan78xx_setup_irq_domain(dev);
3482 	if (ret < 0) {
3483 		netdev_warn(dev->net,
3484 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3485 		goto out1;
3486 	}
3487 
3488 	/* Init all registers */
3489 	ret = lan78xx_reset(dev);
3490 	if (ret) {
3491 		netdev_warn(dev->net, "Registers INIT FAILED....");
3492 		goto out2;
3493 	}
3494 
3495 	ret = lan78xx_mdio_init(dev);
3496 	if (ret) {
3497 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3498 		goto out2;
3499 	}
3500 
3501 	dev->net->flags |= IFF_MULTICAST;
3502 
3503 	pdata->wol = WAKE_MAGIC;
3504 
3505 	return ret;
3506 
3507 out2:
3508 	lan78xx_remove_irq_domain(dev);
3509 
3510 out1:
3511 	netdev_warn(dev->net, "Bind routine FAILED");
3512 	cancel_work_sync(&pdata->set_multicast);
3513 	cancel_work_sync(&pdata->set_vlan);
3514 	kfree(pdata);
3515 	return ret;
3516 }
3517 
3518 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3519 {
3520 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3521 
3522 	lan78xx_remove_irq_domain(dev);
3523 
3524 	lan78xx_remove_mdio(dev);
3525 
3526 	if (pdata) {
3527 		cancel_work_sync(&pdata->set_multicast);
3528 		cancel_work_sync(&pdata->set_vlan);
3529 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3530 		kfree(pdata);
3531 		pdata = NULL;
3532 		dev->data[0] = 0;
3533 	}
3534 }
3535 
3536 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3537 				    struct sk_buff *skb,
3538 				    u32 rx_cmd_a, u32 rx_cmd_b)
3539 {
3540 	/* HW Checksum offload appears to be flawed if used when not stripping
3541 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3542 	 */
3543 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3544 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3545 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3546 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3547 		skb->ip_summed = CHECKSUM_NONE;
3548 	} else {
3549 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3550 		skb->ip_summed = CHECKSUM_COMPLETE;
3551 	}
3552 }
3553 
3554 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3555 				    struct sk_buff *skb,
3556 				    u32 rx_cmd_a, u32 rx_cmd_b)
3557 {
3558 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3559 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3560 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3561 				       (rx_cmd_b & 0xffff));
3562 }
3563 
3564 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3565 {
3566 	dev->net->stats.rx_packets++;
3567 	dev->net->stats.rx_bytes += skb->len;
3568 
3569 	skb->protocol = eth_type_trans(skb, dev->net);
3570 
3571 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3572 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3573 	memset(skb->cb, 0, sizeof(struct skb_data));
3574 
3575 	if (skb_defer_rx_timestamp(skb))
3576 		return;
3577 
3578 	napi_gro_receive(&dev->napi, skb);
3579 }
3580 
3581 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3582 		      int budget, int *work_done)
3583 {
3584 	if (skb->len < RX_SKB_MIN_LEN)
3585 		return 0;
3586 
3587 	/* Extract frames from the URB buffer and pass each one to
3588 	 * the stack in a new NAPI SKB.
3589 	 */
3590 	while (skb->len > 0) {
3591 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3592 		u16 rx_cmd_c;
3593 		unsigned char *packet;
3594 
3595 		rx_cmd_a = get_unaligned_le32(skb->data);
3596 		skb_pull(skb, sizeof(rx_cmd_a));
3597 
3598 		rx_cmd_b = get_unaligned_le32(skb->data);
3599 		skb_pull(skb, sizeof(rx_cmd_b));
3600 
3601 		rx_cmd_c = get_unaligned_le16(skb->data);
3602 		skb_pull(skb, sizeof(rx_cmd_c));
3603 
3604 		packet = skb->data;
3605 
3606 		/* get the packet length */
3607 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3608 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3609 
3610 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3611 			netif_dbg(dev, rx_err, dev->net,
3612 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3613 		} else {
3614 			u32 frame_len = size - ETH_FCS_LEN;
3615 			struct sk_buff *skb2;
3616 
3617 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3618 			if (!skb2)
3619 				return 0;
3620 
3621 			memcpy(skb2->data, packet, frame_len);
3622 
3623 			skb_put(skb2, frame_len);
3624 
3625 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3626 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3627 
3628 			/* Processing of the URB buffer must complete once
3629 			 * it has started. If the NAPI work budget is exhausted
3630 			 * while frames remain they are added to the overflow
3631 			 * queue for delivery in the next NAPI polling cycle.
3632 			 */
3633 			if (*work_done < budget) {
3634 				lan78xx_skb_return(dev, skb2);
3635 				++(*work_done);
3636 			} else {
3637 				skb_queue_tail(&dev->rxq_overflow, skb2);
3638 			}
3639 		}
3640 
3641 		skb_pull(skb, size);
3642 
3643 		/* skip padding bytes before the next frame starts */
3644 		if (skb->len)
3645 			skb_pull(skb, align_count);
3646 	}
3647 
3648 	return 1;
3649 }
3650 
3651 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3652 			      int budget, int *work_done)
3653 {
3654 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3655 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3656 		dev->net->stats.rx_errors++;
3657 	}
3658 }
3659 
3660 static void rx_complete(struct urb *urb)
3661 {
3662 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3663 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3664 	struct lan78xx_net *dev = entry->dev;
3665 	int urb_status = urb->status;
3666 	enum skb_state state;
3667 
3668 	netif_dbg(dev, rx_status, dev->net,
3669 		  "rx done: status %d", urb->status);
3670 
3671 	skb_put(skb, urb->actual_length);
3672 	state = rx_done;
3673 
3674 	if (urb != entry->urb)
3675 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3676 
3677 	switch (urb_status) {
3678 	case 0:
3679 		if (skb->len < RX_SKB_MIN_LEN) {
3680 			state = rx_cleanup;
3681 			dev->net->stats.rx_errors++;
3682 			dev->net->stats.rx_length_errors++;
3683 			netif_dbg(dev, rx_err, dev->net,
3684 				  "rx length %d\n", skb->len);
3685 		}
3686 		usb_mark_last_busy(dev->udev);
3687 		break;
3688 	case -EPIPE:
3689 		dev->net->stats.rx_errors++;
3690 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3691 		fallthrough;
3692 	case -ECONNRESET:				/* async unlink */
3693 	case -ESHUTDOWN:				/* hardware gone */
3694 		netif_dbg(dev, ifdown, dev->net,
3695 			  "rx shutdown, code %d\n", urb_status);
3696 		state = rx_cleanup;
3697 		break;
3698 	case -EPROTO:
3699 	case -ETIME:
3700 	case -EILSEQ:
3701 		dev->net->stats.rx_errors++;
3702 		state = rx_cleanup;
3703 		break;
3704 
3705 	/* data overrun ... flush fifo? */
3706 	case -EOVERFLOW:
3707 		dev->net->stats.rx_over_errors++;
3708 		fallthrough;
3709 
3710 	default:
3711 		state = rx_cleanup;
3712 		dev->net->stats.rx_errors++;
3713 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3714 		break;
3715 	}
3716 
3717 	state = defer_bh(dev, skb, &dev->rxq, state);
3718 }
3719 
3720 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3721 {
3722 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3723 	size_t size = dev->rx_urb_size;
3724 	struct urb *urb = entry->urb;
3725 	unsigned long lockflags;
3726 	int ret = 0;
3727 
3728 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3729 			  skb->data, size, rx_complete, skb);
3730 
3731 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3732 
3733 	if (netif_device_present(dev->net) &&
3734 	    netif_running(dev->net) &&
3735 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3736 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3737 		ret = usb_submit_urb(urb, flags);
3738 		switch (ret) {
3739 		case 0:
3740 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3741 			break;
3742 		case -EPIPE:
3743 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3744 			break;
3745 		case -ENODEV:
3746 		case -ENOENT:
3747 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3748 			netif_device_detach(dev->net);
3749 			break;
3750 		case -EHOSTUNREACH:
3751 			ret = -ENOLINK;
3752 			napi_schedule(&dev->napi);
3753 			break;
3754 		default:
3755 			netif_dbg(dev, rx_err, dev->net,
3756 				  "rx submit, %d\n", ret);
3757 			napi_schedule(&dev->napi);
3758 			break;
3759 		}
3760 	} else {
3761 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3762 		ret = -ENOLINK;
3763 	}
3764 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3765 
3766 	if (ret)
3767 		lan78xx_release_rx_buf(dev, skb);
3768 
3769 	return ret;
3770 }
3771 
3772 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3773 {
3774 	struct sk_buff *rx_buf;
3775 
3776 	/* Ensure the maximum number of Rx URBs is submitted
3777 	 */
3778 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3779 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3780 			break;
3781 	}
3782 }
3783 
3784 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3785 				    struct sk_buff *rx_buf)
3786 {
3787 	/* reset SKB data pointers */
3788 
3789 	rx_buf->data = rx_buf->head;
3790 	skb_reset_tail_pointer(rx_buf);
3791 	rx_buf->len = 0;
3792 	rx_buf->data_len = 0;
3793 
3794 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3795 }
3796 
3797 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3798 {
3799 	u32 tx_cmd_a;
3800 	u32 tx_cmd_b;
3801 
3802 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3803 
3804 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3805 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3806 
3807 	tx_cmd_b = 0;
3808 	if (skb_is_gso(skb)) {
3809 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3810 
3811 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3812 
3813 		tx_cmd_a |= TX_CMD_A_LSO_;
3814 	}
3815 
3816 	if (skb_vlan_tag_present(skb)) {
3817 		tx_cmd_a |= TX_CMD_A_IVTG_;
3818 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3819 	}
3820 
3821 	put_unaligned_le32(tx_cmd_a, buffer);
3822 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3823 }
3824 
3825 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3826 					    struct sk_buff *tx_buf)
3827 {
3828 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3829 	int remain = dev->tx_urb_size;
3830 	u8 *tx_data = tx_buf->data;
3831 	u32 urb_len = 0;
3832 
3833 	entry->num_of_packet = 0;
3834 	entry->length = 0;
3835 
3836 	/* Work through the pending SKBs and copy the data of each SKB into
3837 	 * the URB buffer if there room for all the SKB data.
3838 	 *
3839 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3840 	 */
3841 	while (remain >= TX_SKB_MIN_LEN) {
3842 		unsigned int pending_bytes;
3843 		unsigned int align_bytes;
3844 		struct sk_buff *skb;
3845 		unsigned int len;
3846 
3847 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3848 
3849 		if (!skb)
3850 			break;
3851 
3852 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3853 			      TX_ALIGNMENT;
3854 		len = align_bytes + TX_CMD_LEN + skb->len;
3855 		if (len > remain) {
3856 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3857 			break;
3858 		}
3859 
3860 		tx_data += align_bytes;
3861 
3862 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3863 		tx_data += TX_CMD_LEN;
3864 
3865 		len = skb->len;
3866 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3867 			struct net_device_stats *stats = &dev->net->stats;
3868 
3869 			stats->tx_dropped++;
3870 			dev_kfree_skb_any(skb);
3871 			tx_data -= TX_CMD_LEN;
3872 			continue;
3873 		}
3874 
3875 		tx_data += len;
3876 		entry->length += len;
3877 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3878 
3879 		dev_kfree_skb_any(skb);
3880 
3881 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3882 
3883 		remain = dev->tx_urb_size - urb_len;
3884 	}
3885 
3886 	skb_put(tx_buf, urb_len);
3887 
3888 	return entry;
3889 }
3890 
3891 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3892 {
3893 	int ret;
3894 
3895 	/* Start the stack Tx queue if it was stopped
3896 	 */
3897 	netif_tx_lock(dev->net);
3898 	if (netif_queue_stopped(dev->net)) {
3899 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3900 			netif_wake_queue(dev->net);
3901 	}
3902 	netif_tx_unlock(dev->net);
3903 
3904 	/* Go through the Tx pending queue and set up URBs to transfer
3905 	 * the data to the device. Stop if no more pending data or URBs,
3906 	 * or if an error occurs when a URB is submitted.
3907 	 */
3908 	do {
3909 		struct skb_data *entry;
3910 		struct sk_buff *tx_buf;
3911 		unsigned long flags;
3912 
3913 		if (skb_queue_empty(&dev->txq_pend))
3914 			break;
3915 
3916 		tx_buf = lan78xx_get_tx_buf(dev);
3917 		if (!tx_buf)
3918 			break;
3919 
3920 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3921 
3922 		spin_lock_irqsave(&dev->txq.lock, flags);
3923 		ret = usb_autopm_get_interface_async(dev->intf);
3924 		if (ret < 0) {
3925 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3926 			goto out;
3927 		}
3928 
3929 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3930 				  tx_buf->data, tx_buf->len, tx_complete,
3931 				  tx_buf);
3932 
3933 		if (tx_buf->len % dev->maxpacket == 0) {
3934 			/* send USB_ZERO_PACKET */
3935 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3936 		}
3937 
3938 #ifdef CONFIG_PM
3939 		/* if device is asleep stop outgoing packet processing */
3940 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3941 			usb_anchor_urb(entry->urb, &dev->deferred);
3942 			netif_stop_queue(dev->net);
3943 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3944 			netdev_dbg(dev->net,
3945 				   "Delaying transmission for resumption\n");
3946 			return;
3947 		}
3948 #endif
3949 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3950 		switch (ret) {
3951 		case 0:
3952 			netif_trans_update(dev->net);
3953 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3954 			break;
3955 		case -EPIPE:
3956 			netif_stop_queue(dev->net);
3957 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3958 			usb_autopm_put_interface_async(dev->intf);
3959 			break;
3960 		case -ENODEV:
3961 		case -ENOENT:
3962 			netif_dbg(dev, tx_err, dev->net,
3963 				  "tx submit urb err %d (disconnected?)", ret);
3964 			netif_device_detach(dev->net);
3965 			break;
3966 		default:
3967 			usb_autopm_put_interface_async(dev->intf);
3968 			netif_dbg(dev, tx_err, dev->net,
3969 				  "tx submit urb err %d\n", ret);
3970 			break;
3971 		}
3972 
3973 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3974 
3975 		if (ret) {
3976 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3977 out:
3978 			dev->net->stats.tx_dropped += entry->num_of_packet;
3979 			lan78xx_release_tx_buf(dev, tx_buf);
3980 		}
3981 	} while (ret == 0);
3982 }
3983 
3984 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3985 {
3986 	struct sk_buff_head done;
3987 	struct sk_buff *rx_buf;
3988 	struct skb_data *entry;
3989 	unsigned long flags;
3990 	int work_done = 0;
3991 
3992 	/* Pass frames received in the last NAPI cycle before
3993 	 * working on newly completed URBs.
3994 	 */
3995 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3996 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3997 		++work_done;
3998 	}
3999 
4000 	/* Take a snapshot of the done queue and move items to a
4001 	 * temporary queue. Rx URB completions will continue to add
4002 	 * to the done queue.
4003 	 */
4004 	__skb_queue_head_init(&done);
4005 
4006 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4007 	skb_queue_splice_init(&dev->rxq_done, &done);
4008 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4009 
4010 	/* Extract receive frames from completed URBs and
4011 	 * pass them to the stack. Re-submit each completed URB.
4012 	 */
4013 	while ((work_done < budget) &&
4014 	       (rx_buf = __skb_dequeue(&done))) {
4015 		entry = (struct skb_data *)(rx_buf->cb);
4016 		switch (entry->state) {
4017 		case rx_done:
4018 			rx_process(dev, rx_buf, budget, &work_done);
4019 			break;
4020 		case rx_cleanup:
4021 			break;
4022 		default:
4023 			netdev_dbg(dev->net, "rx buf state %d\n",
4024 				   entry->state);
4025 			break;
4026 		}
4027 
4028 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4029 	}
4030 
4031 	/* If budget was consumed before processing all the URBs put them
4032 	 * back on the front of the done queue. They will be first to be
4033 	 * processed in the next NAPI cycle.
4034 	 */
4035 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4036 	skb_queue_splice(&done, &dev->rxq_done);
4037 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4038 
4039 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4040 		/* reset update timer delta */
4041 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4042 			dev->delta = 1;
4043 			mod_timer(&dev->stat_monitor,
4044 				  jiffies + STAT_UPDATE_TIMER);
4045 		}
4046 
4047 		/* Submit all free Rx URBs */
4048 
4049 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4050 			lan78xx_rx_urb_submit_all(dev);
4051 
4052 		/* Submit new Tx URBs */
4053 
4054 		lan78xx_tx_bh(dev);
4055 	}
4056 
4057 	return work_done;
4058 }
4059 
4060 static int lan78xx_poll(struct napi_struct *napi, int budget)
4061 {
4062 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4063 	int result = budget;
4064 	int work_done;
4065 
4066 	/* Don't do any work if the device is suspended */
4067 
4068 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4069 		napi_complete_done(napi, 0);
4070 		return 0;
4071 	}
4072 
4073 	/* Process completed URBs and submit new URBs */
4074 
4075 	work_done = lan78xx_bh(dev, budget);
4076 
4077 	if (work_done < budget) {
4078 		napi_complete_done(napi, work_done);
4079 
4080 		/* Start a new polling cycle if data was received or
4081 		 * data is waiting to be transmitted.
4082 		 */
4083 		if (!skb_queue_empty(&dev->rxq_done)) {
4084 			napi_schedule(napi);
4085 		} else if (netif_carrier_ok(dev->net)) {
4086 			if (skb_queue_empty(&dev->txq) &&
4087 			    !skb_queue_empty(&dev->txq_pend)) {
4088 				napi_schedule(napi);
4089 			} else {
4090 				netif_tx_lock(dev->net);
4091 				if (netif_queue_stopped(dev->net)) {
4092 					netif_wake_queue(dev->net);
4093 					napi_schedule(napi);
4094 				}
4095 				netif_tx_unlock(dev->net);
4096 			}
4097 		}
4098 		result = work_done;
4099 	}
4100 
4101 	return result;
4102 }
4103 
4104 static void lan78xx_delayedwork(struct work_struct *work)
4105 {
4106 	int status;
4107 	struct lan78xx_net *dev;
4108 
4109 	dev = container_of(work, struct lan78xx_net, wq.work);
4110 
4111 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4112 		return;
4113 
4114 	if (usb_autopm_get_interface(dev->intf) < 0)
4115 		return;
4116 
4117 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4118 		unlink_urbs(dev, &dev->txq);
4119 
4120 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4121 		if (status < 0 &&
4122 		    status != -EPIPE &&
4123 		    status != -ESHUTDOWN) {
4124 			if (netif_msg_tx_err(dev))
4125 				netdev_err(dev->net,
4126 					   "can't clear tx halt, status %d\n",
4127 					   status);
4128 		} else {
4129 			clear_bit(EVENT_TX_HALT, &dev->flags);
4130 			if (status != -ESHUTDOWN)
4131 				netif_wake_queue(dev->net);
4132 		}
4133 	}
4134 
4135 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4136 		unlink_urbs(dev, &dev->rxq);
4137 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4138 		if (status < 0 &&
4139 		    status != -EPIPE &&
4140 		    status != -ESHUTDOWN) {
4141 			if (netif_msg_rx_err(dev))
4142 				netdev_err(dev->net,
4143 					   "can't clear rx halt, status %d\n",
4144 					   status);
4145 		} else {
4146 			clear_bit(EVENT_RX_HALT, &dev->flags);
4147 			napi_schedule(&dev->napi);
4148 		}
4149 	}
4150 
4151 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4152 		int ret = 0;
4153 
4154 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4155 		if (lan78xx_link_reset(dev) < 0) {
4156 			netdev_info(dev->net, "link reset failed (%d)\n",
4157 				    ret);
4158 		}
4159 	}
4160 
4161 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4162 		lan78xx_update_stats(dev);
4163 
4164 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4165 
4166 		mod_timer(&dev->stat_monitor,
4167 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4168 
4169 		dev->delta = min((dev->delta * 2), 50);
4170 	}
4171 
4172 	usb_autopm_put_interface(dev->intf);
4173 }
4174 
4175 static void intr_complete(struct urb *urb)
4176 {
4177 	struct lan78xx_net *dev = urb->context;
4178 	int status = urb->status;
4179 
4180 	switch (status) {
4181 	/* success */
4182 	case 0:
4183 		lan78xx_status(dev, urb);
4184 		break;
4185 
4186 	/* software-driven interface shutdown */
4187 	case -ENOENT:			/* urb killed */
4188 	case -ENODEV:			/* hardware gone */
4189 	case -ESHUTDOWN:		/* hardware gone */
4190 		netif_dbg(dev, ifdown, dev->net,
4191 			  "intr shutdown, code %d\n", status);
4192 		return;
4193 
4194 	/* NOTE:  not throttling like RX/TX, since this endpoint
4195 	 * already polls infrequently
4196 	 */
4197 	default:
4198 		netdev_dbg(dev->net, "intr status %d\n", status);
4199 		break;
4200 	}
4201 
4202 	if (!netif_device_present(dev->net) ||
4203 	    !netif_running(dev->net)) {
4204 		netdev_warn(dev->net, "not submitting new status URB");
4205 		return;
4206 	}
4207 
4208 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4209 	status = usb_submit_urb(urb, GFP_ATOMIC);
4210 
4211 	switch (status) {
4212 	case  0:
4213 		break;
4214 	case -ENODEV:
4215 	case -ENOENT:
4216 		netif_dbg(dev, timer, dev->net,
4217 			  "intr resubmit %d (disconnect?)", status);
4218 		netif_device_detach(dev->net);
4219 		break;
4220 	default:
4221 		netif_err(dev, timer, dev->net,
4222 			  "intr resubmit --> %d\n", status);
4223 		break;
4224 	}
4225 }
4226 
4227 static void lan78xx_disconnect(struct usb_interface *intf)
4228 {
4229 	struct lan78xx_net *dev;
4230 	struct usb_device *udev;
4231 	struct net_device *net;
4232 	struct phy_device *phydev;
4233 
4234 	dev = usb_get_intfdata(intf);
4235 	usb_set_intfdata(intf, NULL);
4236 	if (!dev)
4237 		return;
4238 
4239 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4240 
4241 	netif_napi_del(&dev->napi);
4242 
4243 	udev = interface_to_usbdev(intf);
4244 	net = dev->net;
4245 
4246 	unregister_netdev(net);
4247 
4248 	cancel_delayed_work_sync(&dev->wq);
4249 
4250 	phydev = net->phydev;
4251 
4252 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4253 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4254 
4255 	phy_disconnect(net->phydev);
4256 
4257 	if (phy_is_pseudo_fixed_link(phydev))
4258 		fixed_phy_unregister(phydev);
4259 
4260 	usb_scuttle_anchored_urbs(&dev->deferred);
4261 
4262 	if (timer_pending(&dev->stat_monitor))
4263 		del_timer_sync(&dev->stat_monitor);
4264 
4265 	lan78xx_unbind(dev, intf);
4266 
4267 	lan78xx_free_tx_resources(dev);
4268 	lan78xx_free_rx_resources(dev);
4269 
4270 	usb_kill_urb(dev->urb_intr);
4271 	usb_free_urb(dev->urb_intr);
4272 
4273 	free_netdev(net);
4274 	usb_put_dev(udev);
4275 }
4276 
4277 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4278 {
4279 	struct lan78xx_net *dev = netdev_priv(net);
4280 
4281 	unlink_urbs(dev, &dev->txq);
4282 	napi_schedule(&dev->napi);
4283 }
4284 
4285 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4286 						struct net_device *netdev,
4287 						netdev_features_t features)
4288 {
4289 	struct lan78xx_net *dev = netdev_priv(netdev);
4290 
4291 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4292 		features &= ~NETIF_F_GSO_MASK;
4293 
4294 	features = vlan_features_check(skb, features);
4295 	features = vxlan_features_check(skb, features);
4296 
4297 	return features;
4298 }
4299 
4300 static const struct net_device_ops lan78xx_netdev_ops = {
4301 	.ndo_open		= lan78xx_open,
4302 	.ndo_stop		= lan78xx_stop,
4303 	.ndo_start_xmit		= lan78xx_start_xmit,
4304 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4305 	.ndo_change_mtu		= lan78xx_change_mtu,
4306 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4307 	.ndo_validate_addr	= eth_validate_addr,
4308 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4309 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4310 	.ndo_set_features	= lan78xx_set_features,
4311 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4312 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4313 	.ndo_features_check	= lan78xx_features_check,
4314 };
4315 
4316 static void lan78xx_stat_monitor(struct timer_list *t)
4317 {
4318 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4319 
4320 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4321 }
4322 
4323 static int lan78xx_probe(struct usb_interface *intf,
4324 			 const struct usb_device_id *id)
4325 {
4326 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4327 	struct lan78xx_net *dev;
4328 	struct net_device *netdev;
4329 	struct usb_device *udev;
4330 	int ret;
4331 	unsigned int maxp;
4332 	unsigned int period;
4333 	u8 *buf = NULL;
4334 
4335 	udev = interface_to_usbdev(intf);
4336 	udev = usb_get_dev(udev);
4337 
4338 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4339 	if (!netdev) {
4340 		dev_err(&intf->dev, "Error: OOM\n");
4341 		ret = -ENOMEM;
4342 		goto out1;
4343 	}
4344 
4345 	/* netdev_printk() needs this */
4346 	SET_NETDEV_DEV(netdev, &intf->dev);
4347 
4348 	dev = netdev_priv(netdev);
4349 	dev->udev = udev;
4350 	dev->intf = intf;
4351 	dev->net = netdev;
4352 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4353 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4354 
4355 	skb_queue_head_init(&dev->rxq);
4356 	skb_queue_head_init(&dev->txq);
4357 	skb_queue_head_init(&dev->rxq_done);
4358 	skb_queue_head_init(&dev->txq_pend);
4359 	skb_queue_head_init(&dev->rxq_overflow);
4360 	mutex_init(&dev->phy_mutex);
4361 	mutex_init(&dev->dev_mutex);
4362 
4363 	ret = lan78xx_urb_config_init(dev);
4364 	if (ret < 0)
4365 		goto out2;
4366 
4367 	ret = lan78xx_alloc_tx_resources(dev);
4368 	if (ret < 0)
4369 		goto out2;
4370 
4371 	ret = lan78xx_alloc_rx_resources(dev);
4372 	if (ret < 0)
4373 		goto out3;
4374 
4375 	/* MTU range: 68 - 9000 */
4376 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4377 
4378 	netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4379 
4380 	netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
4381 
4382 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4383 	init_usb_anchor(&dev->deferred);
4384 
4385 	netdev->netdev_ops = &lan78xx_netdev_ops;
4386 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4387 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4388 
4389 	dev->delta = 1;
4390 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4391 
4392 	mutex_init(&dev->stats.access_lock);
4393 
4394 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4395 		ret = -ENODEV;
4396 		goto out4;
4397 	}
4398 
4399 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4400 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4401 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4402 		ret = -ENODEV;
4403 		goto out4;
4404 	}
4405 
4406 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4407 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4408 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4409 		ret = -ENODEV;
4410 		goto out4;
4411 	}
4412 
4413 	ep_intr = &intf->cur_altsetting->endpoint[2];
4414 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4415 		ret = -ENODEV;
4416 		goto out4;
4417 	}
4418 
4419 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4420 					usb_endpoint_num(&ep_intr->desc));
4421 
4422 	ret = lan78xx_bind(dev, intf);
4423 	if (ret < 0)
4424 		goto out4;
4425 
4426 	period = ep_intr->desc.bInterval;
4427 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4428 	buf = kmalloc(maxp, GFP_KERNEL);
4429 	if (!buf) {
4430 		ret = -ENOMEM;
4431 		goto out5;
4432 	}
4433 
4434 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4435 	if (!dev->urb_intr) {
4436 		ret = -ENOMEM;
4437 		goto out6;
4438 	} else {
4439 		usb_fill_int_urb(dev->urb_intr, dev->udev,
4440 				 dev->pipe_intr, buf, maxp,
4441 				 intr_complete, dev, period);
4442 		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4443 	}
4444 
4445 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4446 
4447 	/* Reject broken descriptors. */
4448 	if (dev->maxpacket == 0) {
4449 		ret = -ENODEV;
4450 		goto out6;
4451 	}
4452 
4453 	/* driver requires remote-wakeup capability during autosuspend. */
4454 	intf->needs_remote_wakeup = 1;
4455 
4456 	ret = lan78xx_phy_init(dev);
4457 	if (ret < 0)
4458 		goto out7;
4459 
4460 	ret = register_netdev(netdev);
4461 	if (ret != 0) {
4462 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4463 		goto out8;
4464 	}
4465 
4466 	usb_set_intfdata(intf, dev);
4467 
4468 	ret = device_set_wakeup_enable(&udev->dev, true);
4469 
4470 	 /* Default delay of 2sec has more overhead than advantage.
4471 	  * Set to 10sec as default.
4472 	  */
4473 	pm_runtime_set_autosuspend_delay(&udev->dev,
4474 					 DEFAULT_AUTOSUSPEND_DELAY);
4475 
4476 	return 0;
4477 
4478 out8:
4479 	phy_disconnect(netdev->phydev);
4480 out7:
4481 	usb_free_urb(dev->urb_intr);
4482 out6:
4483 	kfree(buf);
4484 out5:
4485 	lan78xx_unbind(dev, intf);
4486 out4:
4487 	netif_napi_del(&dev->napi);
4488 	lan78xx_free_rx_resources(dev);
4489 out3:
4490 	lan78xx_free_tx_resources(dev);
4491 out2:
4492 	free_netdev(netdev);
4493 out1:
4494 	usb_put_dev(udev);
4495 
4496 	return ret;
4497 }
4498 
4499 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4500 {
4501 	const u16 crc16poly = 0x8005;
4502 	int i;
4503 	u16 bit, crc, msb;
4504 	u8 data;
4505 
4506 	crc = 0xFFFF;
4507 	for (i = 0; i < len; i++) {
4508 		data = *buf++;
4509 		for (bit = 0; bit < 8; bit++) {
4510 			msb = crc >> 15;
4511 			crc <<= 1;
4512 
4513 			if (msb ^ (u16)(data & 1)) {
4514 				crc ^= crc16poly;
4515 				crc |= (u16)0x0001U;
4516 			}
4517 			data >>= 1;
4518 		}
4519 	}
4520 
4521 	return crc;
4522 }
4523 
4524 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4525 {
4526 	u32 buf;
4527 	int ret;
4528 
4529 	ret = lan78xx_stop_tx_path(dev);
4530 	if (ret < 0)
4531 		return ret;
4532 
4533 	ret = lan78xx_stop_rx_path(dev);
4534 	if (ret < 0)
4535 		return ret;
4536 
4537 	/* auto suspend (selective suspend) */
4538 
4539 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4540 	if (ret < 0)
4541 		return ret;
4542 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4543 	if (ret < 0)
4544 		return ret;
4545 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4546 	if (ret < 0)
4547 		return ret;
4548 
4549 	/* set goodframe wakeup */
4550 
4551 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4552 	if (ret < 0)
4553 		return ret;
4554 
4555 	buf |= WUCSR_RFE_WAKE_EN_;
4556 	buf |= WUCSR_STORE_WAKE_;
4557 
4558 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4559 	if (ret < 0)
4560 		return ret;
4561 
4562 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4563 	if (ret < 0)
4564 		return ret;
4565 
4566 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4567 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4568 	buf |= PMT_CTL_PHY_WAKE_EN_;
4569 	buf |= PMT_CTL_WOL_EN_;
4570 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4571 	buf |= PMT_CTL_SUS_MODE_3_;
4572 
4573 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4574 	if (ret < 0)
4575 		return ret;
4576 
4577 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4578 	if (ret < 0)
4579 		return ret;
4580 
4581 	buf |= PMT_CTL_WUPS_MASK_;
4582 
4583 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4584 	if (ret < 0)
4585 		return ret;
4586 
4587 	ret = lan78xx_start_rx_path(dev);
4588 
4589 	return ret;
4590 }
4591 
4592 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4593 {
4594 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4595 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4596 	const u8 arp_type[2] = { 0x08, 0x06 };
4597 	u32 temp_pmt_ctl;
4598 	int mask_index;
4599 	u32 temp_wucsr;
4600 	u32 buf;
4601 	u16 crc;
4602 	int ret;
4603 
4604 	ret = lan78xx_stop_tx_path(dev);
4605 	if (ret < 0)
4606 		return ret;
4607 	ret = lan78xx_stop_rx_path(dev);
4608 	if (ret < 0)
4609 		return ret;
4610 
4611 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4612 	if (ret < 0)
4613 		return ret;
4614 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4615 	if (ret < 0)
4616 		return ret;
4617 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4618 	if (ret < 0)
4619 		return ret;
4620 
4621 	temp_wucsr = 0;
4622 
4623 	temp_pmt_ctl = 0;
4624 
4625 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4626 	if (ret < 0)
4627 		return ret;
4628 
4629 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4630 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4631 
4632 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4633 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4634 		if (ret < 0)
4635 			return ret;
4636 	}
4637 
4638 	mask_index = 0;
4639 	if (wol & WAKE_PHY) {
4640 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4641 
4642 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4643 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4644 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4645 	}
4646 	if (wol & WAKE_MAGIC) {
4647 		temp_wucsr |= WUCSR_MPEN_;
4648 
4649 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4650 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4651 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4652 	}
4653 	if (wol & WAKE_BCAST) {
4654 		temp_wucsr |= WUCSR_BCST_EN_;
4655 
4656 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4657 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4658 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4659 	}
4660 	if (wol & WAKE_MCAST) {
4661 		temp_wucsr |= WUCSR_WAKE_EN_;
4662 
4663 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4664 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4665 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4666 					WUF_CFGX_EN_ |
4667 					WUF_CFGX_TYPE_MCAST_ |
4668 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4669 					(crc & WUF_CFGX_CRC16_MASK_));
4670 		if (ret < 0)
4671 			return ret;
4672 
4673 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4674 		if (ret < 0)
4675 			return ret;
4676 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4677 		if (ret < 0)
4678 			return ret;
4679 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4680 		if (ret < 0)
4681 			return ret;
4682 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4683 		if (ret < 0)
4684 			return ret;
4685 
4686 		mask_index++;
4687 
4688 		/* for IPv6 Multicast */
4689 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4690 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4691 					WUF_CFGX_EN_ |
4692 					WUF_CFGX_TYPE_MCAST_ |
4693 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4694 					(crc & WUF_CFGX_CRC16_MASK_));
4695 		if (ret < 0)
4696 			return ret;
4697 
4698 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4699 		if (ret < 0)
4700 			return ret;
4701 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4702 		if (ret < 0)
4703 			return ret;
4704 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4705 		if (ret < 0)
4706 			return ret;
4707 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4708 		if (ret < 0)
4709 			return ret;
4710 
4711 		mask_index++;
4712 
4713 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4714 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4715 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4716 	}
4717 	if (wol & WAKE_UCAST) {
4718 		temp_wucsr |= WUCSR_PFDA_EN_;
4719 
4720 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4721 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4722 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4723 	}
4724 	if (wol & WAKE_ARP) {
4725 		temp_wucsr |= WUCSR_WAKE_EN_;
4726 
4727 		/* set WUF_CFG & WUF_MASK
4728 		 * for packettype (offset 12,13) = ARP (0x0806)
4729 		 */
4730 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4731 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4732 					WUF_CFGX_EN_ |
4733 					WUF_CFGX_TYPE_ALL_ |
4734 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4735 					(crc & WUF_CFGX_CRC16_MASK_));
4736 		if (ret < 0)
4737 			return ret;
4738 
4739 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4740 		if (ret < 0)
4741 			return ret;
4742 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4743 		if (ret < 0)
4744 			return ret;
4745 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4746 		if (ret < 0)
4747 			return ret;
4748 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4749 		if (ret < 0)
4750 			return ret;
4751 
4752 		mask_index++;
4753 
4754 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4755 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4756 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4757 	}
4758 
4759 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4760 	if (ret < 0)
4761 		return ret;
4762 
4763 	/* when multiple WOL bits are set */
4764 	if (hweight_long((unsigned long)wol) > 1) {
4765 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4766 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4767 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4768 	}
4769 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4770 	if (ret < 0)
4771 		return ret;
4772 
4773 	/* clear WUPS */
4774 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4775 	if (ret < 0)
4776 		return ret;
4777 
4778 	buf |= PMT_CTL_WUPS_MASK_;
4779 
4780 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4781 	if (ret < 0)
4782 		return ret;
4783 
4784 	ret = lan78xx_start_rx_path(dev);
4785 
4786 	return ret;
4787 }
4788 
4789 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4790 {
4791 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4792 	bool dev_open;
4793 	int ret;
4794 
4795 	mutex_lock(&dev->dev_mutex);
4796 
4797 	netif_dbg(dev, ifdown, dev->net,
4798 		  "suspending: pm event %#x", message.event);
4799 
4800 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4801 
4802 	if (dev_open) {
4803 		spin_lock_irq(&dev->txq.lock);
4804 		/* don't autosuspend while transmitting */
4805 		if ((skb_queue_len(&dev->txq) ||
4806 		     skb_queue_len(&dev->txq_pend)) &&
4807 		    PMSG_IS_AUTO(message)) {
4808 			spin_unlock_irq(&dev->txq.lock);
4809 			ret = -EBUSY;
4810 			goto out;
4811 		} else {
4812 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4813 			spin_unlock_irq(&dev->txq.lock);
4814 		}
4815 
4816 		/* stop RX */
4817 		ret = lan78xx_stop_rx_path(dev);
4818 		if (ret < 0)
4819 			goto out;
4820 
4821 		ret = lan78xx_flush_rx_fifo(dev);
4822 		if (ret < 0)
4823 			goto out;
4824 
4825 		/* stop Tx */
4826 		ret = lan78xx_stop_tx_path(dev);
4827 		if (ret < 0)
4828 			goto out;
4829 
4830 		/* empty out the Rx and Tx queues */
4831 		netif_device_detach(dev->net);
4832 		lan78xx_terminate_urbs(dev);
4833 		usb_kill_urb(dev->urb_intr);
4834 
4835 		/* reattach */
4836 		netif_device_attach(dev->net);
4837 
4838 		del_timer(&dev->stat_monitor);
4839 
4840 		if (PMSG_IS_AUTO(message)) {
4841 			ret = lan78xx_set_auto_suspend(dev);
4842 			if (ret < 0)
4843 				goto out;
4844 		} else {
4845 			struct lan78xx_priv *pdata;
4846 
4847 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4848 			netif_carrier_off(dev->net);
4849 			ret = lan78xx_set_suspend(dev, pdata->wol);
4850 			if (ret < 0)
4851 				goto out;
4852 		}
4853 	} else {
4854 		/* Interface is down; don't allow WOL and PHY
4855 		 * events to wake up the host
4856 		 */
4857 		u32 buf;
4858 
4859 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4860 
4861 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4862 		if (ret < 0)
4863 			goto out;
4864 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4865 		if (ret < 0)
4866 			goto out;
4867 
4868 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4869 		if (ret < 0)
4870 			goto out;
4871 
4872 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4873 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4874 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4875 		buf |= PMT_CTL_SUS_MODE_3_;
4876 
4877 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4878 		if (ret < 0)
4879 			goto out;
4880 
4881 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4882 		if (ret < 0)
4883 			goto out;
4884 
4885 		buf |= PMT_CTL_WUPS_MASK_;
4886 
4887 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4888 		if (ret < 0)
4889 			goto out;
4890 	}
4891 
4892 	ret = 0;
4893 out:
4894 	mutex_unlock(&dev->dev_mutex);
4895 
4896 	return ret;
4897 }
4898 
4899 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4900 {
4901 	bool pipe_halted = false;
4902 	struct urb *urb;
4903 
4904 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4905 		struct sk_buff *skb = urb->context;
4906 		int ret;
4907 
4908 		if (!netif_device_present(dev->net) ||
4909 		    !netif_carrier_ok(dev->net) ||
4910 		    pipe_halted) {
4911 			lan78xx_release_tx_buf(dev, skb);
4912 			continue;
4913 		}
4914 
4915 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4916 
4917 		if (ret == 0) {
4918 			netif_trans_update(dev->net);
4919 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4920 		} else {
4921 			if (ret == -EPIPE) {
4922 				netif_stop_queue(dev->net);
4923 				pipe_halted = true;
4924 			} else if (ret == -ENODEV) {
4925 				netif_device_detach(dev->net);
4926 			}
4927 
4928 			lan78xx_release_tx_buf(dev, skb);
4929 		}
4930 	}
4931 
4932 	return pipe_halted;
4933 }
4934 
4935 static int lan78xx_resume(struct usb_interface *intf)
4936 {
4937 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4938 	bool dev_open;
4939 	int ret;
4940 
4941 	mutex_lock(&dev->dev_mutex);
4942 
4943 	netif_dbg(dev, ifup, dev->net, "resuming device");
4944 
4945 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4946 
4947 	if (dev_open) {
4948 		bool pipe_halted = false;
4949 
4950 		ret = lan78xx_flush_tx_fifo(dev);
4951 		if (ret < 0)
4952 			goto out;
4953 
4954 		if (dev->urb_intr) {
4955 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4956 
4957 			if (ret < 0) {
4958 				if (ret == -ENODEV)
4959 					netif_device_detach(dev->net);
4960 				netdev_warn(dev->net, "Failed to submit intr URB");
4961 			}
4962 		}
4963 
4964 		spin_lock_irq(&dev->txq.lock);
4965 
4966 		if (netif_device_present(dev->net)) {
4967 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4968 
4969 			if (pipe_halted)
4970 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4971 		}
4972 
4973 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4974 
4975 		spin_unlock_irq(&dev->txq.lock);
4976 
4977 		if (!pipe_halted &&
4978 		    netif_device_present(dev->net) &&
4979 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4980 			netif_start_queue(dev->net);
4981 
4982 		ret = lan78xx_start_tx_path(dev);
4983 		if (ret < 0)
4984 			goto out;
4985 
4986 		napi_schedule(&dev->napi);
4987 
4988 		if (!timer_pending(&dev->stat_monitor)) {
4989 			dev->delta = 1;
4990 			mod_timer(&dev->stat_monitor,
4991 				  jiffies + STAT_UPDATE_TIMER);
4992 		}
4993 
4994 	} else {
4995 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4996 	}
4997 
4998 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4999 	if (ret < 0)
5000 		goto out;
5001 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5002 	if (ret < 0)
5003 		goto out;
5004 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5005 	if (ret < 0)
5006 		goto out;
5007 
5008 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5009 					     WUCSR2_ARP_RCD_ |
5010 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5011 					     WUCSR2_IPV4_TCPSYN_RCD_);
5012 	if (ret < 0)
5013 		goto out;
5014 
5015 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5016 					    WUCSR_EEE_RX_WAKE_ |
5017 					    WUCSR_PFDA_FR_ |
5018 					    WUCSR_RFE_WAKE_FR_ |
5019 					    WUCSR_WUFR_ |
5020 					    WUCSR_MPR_ |
5021 					    WUCSR_BCST_FR_);
5022 	if (ret < 0)
5023 		goto out;
5024 
5025 	ret = 0;
5026 out:
5027 	mutex_unlock(&dev->dev_mutex);
5028 
5029 	return ret;
5030 }
5031 
5032 static int lan78xx_reset_resume(struct usb_interface *intf)
5033 {
5034 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5035 	int ret;
5036 
5037 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5038 
5039 	ret = lan78xx_reset(dev);
5040 	if (ret < 0)
5041 		return ret;
5042 
5043 	phy_start(dev->net->phydev);
5044 
5045 	ret = lan78xx_resume(intf);
5046 
5047 	return ret;
5048 }
5049 
5050 static const struct usb_device_id products[] = {
5051 	{
5052 	/* LAN7800 USB Gigabit Ethernet Device */
5053 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5054 	},
5055 	{
5056 	/* LAN7850 USB Gigabit Ethernet Device */
5057 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5058 	},
5059 	{
5060 	/* LAN7801 USB Gigabit Ethernet Device */
5061 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5062 	},
5063 	{},
5064 };
5065 MODULE_DEVICE_TABLE(usb, products);
5066 
5067 static struct usb_driver lan78xx_driver = {
5068 	.name			= DRIVER_NAME,
5069 	.id_table		= products,
5070 	.probe			= lan78xx_probe,
5071 	.disconnect		= lan78xx_disconnect,
5072 	.suspend		= lan78xx_suspend,
5073 	.resume			= lan78xx_resume,
5074 	.reset_resume		= lan78xx_reset_resume,
5075 	.supports_autosuspend	= 1,
5076 	.disable_hub_initiated_lpm = 1,
5077 };
5078 
5079 module_usb_driver(lan78xx_driver);
5080 
5081 MODULE_AUTHOR(DRIVER_AUTHOR);
5082 MODULE_DESCRIPTION(DRIVER_DESC);
5083 MODULE_LICENSE("GPL");
5084