xref: /linux/drivers/net/usb/lan78xx.c (revision 3f330db30638b6489d548084a7e8843374d41ad0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		phy_mutex; /* for phy access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	ret = lan78xx_write_reg(dev, reg, buf);
678 	if (ret < 0)
679 		return ret;
680 
681 	return 0;
682 }
683 
684 static int lan78xx_read_stats(struct lan78xx_net *dev,
685 			      struct lan78xx_statstage *data)
686 {
687 	int ret = 0;
688 	int i;
689 	struct lan78xx_statstage *stats;
690 	u32 *src;
691 	u32 *dst;
692 
693 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
694 	if (!stats)
695 		return -ENOMEM;
696 
697 	ret = usb_control_msg(dev->udev,
698 			      usb_rcvctrlpipe(dev->udev, 0),
699 			      USB_VENDOR_REQUEST_GET_STATS,
700 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
701 			      0,
702 			      0,
703 			      (void *)stats,
704 			      sizeof(*stats),
705 			      USB_CTRL_SET_TIMEOUT);
706 	if (likely(ret >= 0)) {
707 		src = (u32 *)stats;
708 		dst = (u32 *)data;
709 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
710 			le32_to_cpus(&src[i]);
711 			dst[i] = src[i];
712 		}
713 	} else {
714 		netdev_warn(dev->net,
715 			    "Failed to read stat ret = %d", ret);
716 	}
717 
718 	kfree(stats);
719 
720 	return ret;
721 }
722 
723 #define check_counter_rollover(struct1, dev_stats, member)		\
724 	do {								\
725 		if ((struct1)->member < (dev_stats).saved.member)	\
726 			(dev_stats).rollover_count.member++;		\
727 	} while (0)
728 
729 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
730 					struct lan78xx_statstage *stats)
731 {
732 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
733 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
734 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
735 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
736 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
737 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
738 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
739 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
740 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
741 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
742 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
743 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
744 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
745 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
746 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
749 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
750 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
751 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
752 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
753 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
754 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
755 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
756 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
757 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
758 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
759 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
760 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
761 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
762 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
763 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
764 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
765 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
766 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
767 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
768 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
769 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
770 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
773 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
774 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
775 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
776 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
777 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
778 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
779 
780 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
781 }
782 
783 static void lan78xx_update_stats(struct lan78xx_net *dev)
784 {
785 	u32 *p, *count, *max;
786 	u64 *data;
787 	int i;
788 	struct lan78xx_statstage lan78xx_stats;
789 
790 	if (usb_autopm_get_interface(dev->intf) < 0)
791 		return;
792 
793 	p = (u32 *)&lan78xx_stats;
794 	count = (u32 *)&dev->stats.rollover_count;
795 	max = (u32 *)&dev->stats.rollover_max;
796 	data = (u64 *)&dev->stats.curr_stat;
797 
798 	mutex_lock(&dev->stats.access_lock);
799 
800 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
801 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
802 
803 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
804 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
805 
806 	mutex_unlock(&dev->stats.access_lock);
807 
808 	usb_autopm_put_interface(dev->intf);
809 }
810 
811 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
812 {
813 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
814 }
815 
816 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
817 			   u32 hw_disabled)
818 {
819 	unsigned long timeout;
820 	bool stopped = true;
821 	int ret;
822 	u32 buf;
823 
824 	/* Stop the h/w block (if not already stopped) */
825 
826 	ret = lan78xx_read_reg(dev, reg, &buf);
827 	if (ret < 0)
828 		return ret;
829 
830 	if (buf & hw_enabled) {
831 		buf &= ~hw_enabled;
832 
833 		ret = lan78xx_write_reg(dev, reg, buf);
834 		if (ret < 0)
835 			return ret;
836 
837 		stopped = false;
838 		timeout = jiffies + HW_DISABLE_TIMEOUT;
839 		do  {
840 			ret = lan78xx_read_reg(dev, reg, &buf);
841 			if (ret < 0)
842 				return ret;
843 
844 			if (buf & hw_disabled)
845 				stopped = true;
846 			else
847 				msleep(HW_DISABLE_DELAY_MS);
848 		} while (!stopped && !time_after(jiffies, timeout));
849 	}
850 
851 	ret = stopped ? 0 : -ETIME;
852 
853 	return ret;
854 }
855 
856 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
857 {
858 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
859 }
860 
861 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
862 {
863 	int ret;
864 
865 	netif_dbg(dev, drv, dev->net, "start tx path");
866 
867 	/* Start the MAC transmitter */
868 
869 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
870 	if (ret < 0)
871 		return ret;
872 
873 	/* Start the Tx FIFO */
874 
875 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
876 	if (ret < 0)
877 		return ret;
878 
879 	return 0;
880 }
881 
882 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
883 {
884 	int ret;
885 
886 	netif_dbg(dev, drv, dev->net, "stop tx path");
887 
888 	/* Stop the Tx FIFO */
889 
890 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
891 	if (ret < 0)
892 		return ret;
893 
894 	/* Stop the MAC transmitter */
895 
896 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
897 	if (ret < 0)
898 		return ret;
899 
900 	return 0;
901 }
902 
903 /* The caller must ensure the Tx path is stopped before calling
904  * lan78xx_flush_tx_fifo().
905  */
906 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
907 {
908 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
909 }
910 
911 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
912 {
913 	int ret;
914 
915 	netif_dbg(dev, drv, dev->net, "start rx path");
916 
917 	/* Start the Rx FIFO */
918 
919 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
920 	if (ret < 0)
921 		return ret;
922 
923 	/* Start the MAC receiver*/
924 
925 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
926 	if (ret < 0)
927 		return ret;
928 
929 	return 0;
930 }
931 
932 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
933 {
934 	int ret;
935 
936 	netif_dbg(dev, drv, dev->net, "stop rx path");
937 
938 	/* Stop the MAC receiver */
939 
940 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
941 	if (ret < 0)
942 		return ret;
943 
944 	/* Stop the Rx FIFO */
945 
946 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
947 	if (ret < 0)
948 		return ret;
949 
950 	return 0;
951 }
952 
953 /* The caller must ensure the Rx path is stopped before calling
954  * lan78xx_flush_rx_fifo().
955  */
956 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
957 {
958 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
959 }
960 
961 /* Loop until the read is completed with timeout called with phy_mutex held */
962 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
963 {
964 	unsigned long start_time = jiffies;
965 	u32 val;
966 	int ret;
967 
968 	do {
969 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
970 		if (unlikely(ret < 0))
971 			return -EIO;
972 
973 		if (!(val & MII_ACC_MII_BUSY_))
974 			return 0;
975 	} while (!time_after(jiffies, start_time + HZ));
976 
977 	return -EIO;
978 }
979 
980 static inline u32 mii_access(int id, int index, int read)
981 {
982 	u32 ret;
983 
984 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
985 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
986 	if (read)
987 		ret |= MII_ACC_MII_READ_;
988 	else
989 		ret |= MII_ACC_MII_WRITE_;
990 	ret |= MII_ACC_MII_BUSY_;
991 
992 	return ret;
993 }
994 
995 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
996 {
997 	unsigned long start_time = jiffies;
998 	u32 val;
999 	int ret;
1000 
1001 	do {
1002 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1003 		if (ret < 0)
1004 			return ret;
1005 
1006 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1007 		    (val & E2P_CMD_EPC_TIMEOUT_))
1008 			break;
1009 		usleep_range(40, 100);
1010 	} while (!time_after(jiffies, start_time + HZ));
1011 
1012 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1013 		netdev_warn(dev->net, "EEPROM read operation timeout");
1014 		return -ETIMEDOUT;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1021 {
1022 	unsigned long start_time = jiffies;
1023 	u32 val;
1024 	int ret;
1025 
1026 	do {
1027 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1028 		if (ret < 0)
1029 			return ret;
1030 
1031 		if (!(val & E2P_CMD_EPC_BUSY_))
1032 			return 0;
1033 
1034 		usleep_range(40, 100);
1035 	} while (!time_after(jiffies, start_time + HZ));
1036 
1037 	netdev_warn(dev->net, "EEPROM is busy");
1038 	return -ETIMEDOUT;
1039 }
1040 
1041 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1042 				   u32 length, u8 *data)
1043 {
1044 	u32 val, saved;
1045 	int i, ret;
1046 
1047 	/* depends on chip, some EEPROM pins are muxed with LED function.
1048 	 * disable & restore LED function to access EEPROM.
1049 	 */
1050 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1051 	if (ret < 0)
1052 		return ret;
1053 
1054 	saved = val;
1055 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1056 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1057 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1058 		if (ret < 0)
1059 			return ret;
1060 	}
1061 
1062 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1063 	if (ret == -ETIMEDOUT)
1064 		goto read_raw_eeprom_done;
1065 	/* If USB fails, there is nothing to do */
1066 	if (ret < 0)
1067 		return ret;
1068 
1069 	for (i = 0; i < length; i++) {
1070 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1071 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1072 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1073 		if (ret < 0)
1074 			return ret;
1075 
1076 		ret = lan78xx_wait_eeprom(dev);
1077 		/* Looks like not USB specific error, try to recover */
1078 		if (ret == -ETIMEDOUT)
1079 			goto read_raw_eeprom_done;
1080 		/* If USB fails, there is nothing to do */
1081 		if (ret < 0)
1082 			return ret;
1083 
1084 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1085 		if (ret < 0)
1086 			return ret;
1087 
1088 		data[i] = val & 0xFF;
1089 		offset++;
1090 	}
1091 
1092 read_raw_eeprom_done:
1093 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1094 		return lan78xx_write_reg(dev, HW_CFG, saved);
1095 
1096 	return 0;
1097 }
1098 
1099 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1100 			       u32 length, u8 *data)
1101 {
1102 	int ret;
1103 	u8 sig;
1104 
1105 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1106 	if (ret < 0)
1107 		return ret;
1108 
1109 	if (sig != EEPROM_INDICATOR)
1110 		return -ENODATA;
1111 
1112 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1113 }
1114 
1115 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1116 				    u32 length, u8 *data)
1117 {
1118 	u32 val;
1119 	u32 saved;
1120 	int i, ret;
1121 
1122 	/* depends on chip, some EEPROM pins are muxed with LED function.
1123 	 * disable & restore LED function to access EEPROM.
1124 	 */
1125 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1126 	if (ret < 0)
1127 		return ret;
1128 
1129 	saved = val;
1130 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1131 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1132 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1133 		if (ret < 0)
1134 			return ret;
1135 	}
1136 
1137 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1138 	/* Looks like not USB specific error, try to recover */
1139 	if (ret == -ETIMEDOUT)
1140 		goto write_raw_eeprom_done;
1141 	/* If USB fails, there is nothing to do */
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	/* Issue write/erase enable command */
1146 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1147 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1148 	if (ret < 0)
1149 		return ret;
1150 
1151 	ret = lan78xx_wait_eeprom(dev);
1152 	/* Looks like not USB specific error, try to recover */
1153 	if (ret == -ETIMEDOUT)
1154 		goto write_raw_eeprom_done;
1155 	/* If USB fails, there is nothing to do */
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	for (i = 0; i < length; i++) {
1160 		/* Fill data register */
1161 		val = data[i];
1162 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1163 		if (ret < 0)
1164 			return ret;
1165 
1166 		/* Send "write" command */
1167 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1168 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1169 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1170 		if (ret < 0)
1171 			return ret;
1172 
1173 		ret = lan78xx_wait_eeprom(dev);
1174 		/* Looks like not USB specific error, try to recover */
1175 		if (ret == -ETIMEDOUT)
1176 			goto write_raw_eeprom_done;
1177 		/* If USB fails, there is nothing to do */
1178 		if (ret < 0)
1179 			return ret;
1180 
1181 		offset++;
1182 	}
1183 
1184 write_raw_eeprom_done:
1185 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1186 		return lan78xx_write_reg(dev, HW_CFG, saved);
1187 
1188 	return 0;
1189 }
1190 
1191 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1192 				u32 length, u8 *data)
1193 {
1194 	unsigned long timeout;
1195 	int ret, i;
1196 	u32 buf;
1197 
1198 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1199 	if (ret < 0)
1200 		return ret;
1201 
1202 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1203 		/* clear it and wait to be cleared */
1204 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1205 		if (ret < 0)
1206 			return ret;
1207 
1208 		timeout = jiffies + HZ;
1209 		do {
1210 			usleep_range(1, 10);
1211 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1212 			if (ret < 0)
1213 				return ret;
1214 
1215 			if (time_after(jiffies, timeout)) {
1216 				netdev_warn(dev->net,
1217 					    "timeout on OTP_PWR_DN");
1218 				return -ETIMEDOUT;
1219 			}
1220 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1221 	}
1222 
1223 	for (i = 0; i < length; i++) {
1224 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1225 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1226 		if (ret < 0)
1227 			return ret;
1228 
1229 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1230 					((offset + i) & OTP_ADDR2_10_3));
1231 		if (ret < 0)
1232 			return ret;
1233 
1234 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1235 		if (ret < 0)
1236 			return ret;
1237 
1238 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1239 		if (ret < 0)
1240 			return ret;
1241 
1242 		timeout = jiffies + HZ;
1243 		do {
1244 			udelay(1);
1245 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1246 			if (ret < 0)
1247 				return ret;
1248 
1249 			if (time_after(jiffies, timeout)) {
1250 				netdev_warn(dev->net,
1251 					    "timeout on OTP_STATUS");
1252 				return -ETIMEDOUT;
1253 			}
1254 		} while (buf & OTP_STATUS_BUSY_);
1255 
1256 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1257 		if (ret < 0)
1258 			return ret;
1259 
1260 		data[i] = (u8)(buf & 0xFF);
1261 	}
1262 
1263 	return 0;
1264 }
1265 
1266 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1267 				 u32 length, u8 *data)
1268 {
1269 	int i;
1270 	u32 buf;
1271 	unsigned long timeout;
1272 	int ret;
1273 
1274 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1275 	if (ret < 0)
1276 		return ret;
1277 
1278 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1279 		/* clear it and wait to be cleared */
1280 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1281 		if (ret < 0)
1282 			return ret;
1283 
1284 		timeout = jiffies + HZ;
1285 		do {
1286 			udelay(1);
1287 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1288 			if (ret < 0)
1289 				return ret;
1290 
1291 			if (time_after(jiffies, timeout)) {
1292 				netdev_warn(dev->net,
1293 					    "timeout on OTP_PWR_DN completion");
1294 				return -ETIMEDOUT;
1295 			}
1296 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1297 	}
1298 
1299 	/* set to BYTE program mode */
1300 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1301 	if (ret < 0)
1302 		return ret;
1303 
1304 	for (i = 0; i < length; i++) {
1305 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1306 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1307 		if (ret < 0)
1308 			return ret;
1309 
1310 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1311 					((offset + i) & OTP_ADDR2_10_3));
1312 		if (ret < 0)
1313 			return ret;
1314 
1315 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1320 		if (ret < 0)
1321 			return ret;
1322 
1323 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1324 		if (ret < 0)
1325 			return ret;
1326 
1327 		timeout = jiffies + HZ;
1328 		do {
1329 			udelay(1);
1330 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1331 			if (ret < 0)
1332 				return ret;
1333 
1334 			if (time_after(jiffies, timeout)) {
1335 				netdev_warn(dev->net,
1336 					    "Timeout on OTP_STATUS completion");
1337 				return -ETIMEDOUT;
1338 			}
1339 		} while (buf & OTP_STATUS_BUSY_);
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1346 			    u32 length, u8 *data)
1347 {
1348 	u8 sig;
1349 	int ret;
1350 
1351 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1352 
1353 	if (ret == 0) {
1354 		if (sig == OTP_INDICATOR_2)
1355 			offset += 0x100;
1356 		else if (sig != OTP_INDICATOR_1)
1357 			ret = -EINVAL;
1358 		if (!ret)
1359 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1366 {
1367 	int i, ret;
1368 
1369 	for (i = 0; i < 100; i++) {
1370 		u32 dp_sel;
1371 
1372 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1373 		if (unlikely(ret < 0))
1374 			return ret;
1375 
1376 		if (dp_sel & DP_SEL_DPRDY_)
1377 			return 0;
1378 
1379 		usleep_range(40, 100);
1380 	}
1381 
1382 	netdev_warn(dev->net, "%s timed out", __func__);
1383 
1384 	return -ETIMEDOUT;
1385 }
1386 
1387 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1388 				  u32 addr, u32 length, u32 *buf)
1389 {
1390 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1391 	int i, ret;
1392 
1393 	ret = usb_autopm_get_interface(dev->intf);
1394 	if (ret < 0)
1395 		return ret;
1396 
1397 	mutex_lock(&pdata->dataport_mutex);
1398 
1399 	ret = lan78xx_dataport_wait_not_busy(dev);
1400 	if (ret < 0)
1401 		goto dataport_write;
1402 
1403 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1404 	if (ret < 0)
1405 		goto dataport_write;
1406 
1407 	for (i = 0; i < length; i++) {
1408 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1409 		if (ret < 0)
1410 			goto dataport_write;
1411 
1412 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1413 		if (ret < 0)
1414 			goto dataport_write;
1415 
1416 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1417 		if (ret < 0)
1418 			goto dataport_write;
1419 
1420 		ret = lan78xx_dataport_wait_not_busy(dev);
1421 		if (ret < 0)
1422 			goto dataport_write;
1423 	}
1424 
1425 dataport_write:
1426 	if (ret < 0)
1427 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1428 
1429 	mutex_unlock(&pdata->dataport_mutex);
1430 	usb_autopm_put_interface(dev->intf);
1431 
1432 	return ret;
1433 }
1434 
1435 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1436 				    int index, u8 addr[ETH_ALEN])
1437 {
1438 	u32 temp;
1439 
1440 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1441 		temp = addr[3];
1442 		temp = addr[2] | (temp << 8);
1443 		temp = addr[1] | (temp << 8);
1444 		temp = addr[0] | (temp << 8);
1445 		pdata->pfilter_table[index][1] = temp;
1446 		temp = addr[5];
1447 		temp = addr[4] | (temp << 8);
1448 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1449 		pdata->pfilter_table[index][0] = temp;
1450 	}
1451 }
1452 
1453 /* returns hash bit number for given MAC address */
1454 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1455 {
1456 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1457 }
1458 
1459 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1460 {
1461 	struct lan78xx_priv *pdata =
1462 			container_of(param, struct lan78xx_priv, set_multicast);
1463 	struct lan78xx_net *dev = pdata->dev;
1464 	int i, ret;
1465 
1466 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1467 		  pdata->rfe_ctl);
1468 
1469 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1470 				     DP_SEL_VHF_VLAN_LEN,
1471 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1472 	if (ret < 0)
1473 		goto multicast_write_done;
1474 
1475 	for (i = 1; i < NUM_OF_MAF; i++) {
1476 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1477 		if (ret < 0)
1478 			goto multicast_write_done;
1479 
1480 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1481 					pdata->pfilter_table[i][1]);
1482 		if (ret < 0)
1483 			goto multicast_write_done;
1484 
1485 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1486 					pdata->pfilter_table[i][0]);
1487 		if (ret < 0)
1488 			goto multicast_write_done;
1489 	}
1490 
1491 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1492 
1493 multicast_write_done:
1494 	if (ret < 0)
1495 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1496 	return;
1497 }
1498 
1499 static void lan78xx_set_multicast(struct net_device *netdev)
1500 {
1501 	struct lan78xx_net *dev = netdev_priv(netdev);
1502 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1503 	unsigned long flags;
1504 	int i;
1505 
1506 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1507 
1508 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1509 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1510 
1511 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1512 		pdata->mchash_table[i] = 0;
1513 
1514 	/* pfilter_table[0] has own HW address */
1515 	for (i = 1; i < NUM_OF_MAF; i++) {
1516 		pdata->pfilter_table[i][0] = 0;
1517 		pdata->pfilter_table[i][1] = 0;
1518 	}
1519 
1520 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1521 
1522 	if (dev->net->flags & IFF_PROMISC) {
1523 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1524 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1525 	} else {
1526 		if (dev->net->flags & IFF_ALLMULTI) {
1527 			netif_dbg(dev, drv, dev->net,
1528 				  "receive all multicast enabled");
1529 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1530 		}
1531 	}
1532 
1533 	if (netdev_mc_count(dev->net)) {
1534 		struct netdev_hw_addr *ha;
1535 		int i;
1536 
1537 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1538 
1539 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1540 
1541 		i = 1;
1542 		netdev_for_each_mc_addr(ha, netdev) {
1543 			/* set first 32 into Perfect Filter */
1544 			if (i < 33) {
1545 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1546 			} else {
1547 				u32 bitnum = lan78xx_hash(ha->addr);
1548 
1549 				pdata->mchash_table[bitnum / 32] |=
1550 							(1 << (bitnum % 32));
1551 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1552 			}
1553 			i++;
1554 		}
1555 	}
1556 
1557 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1558 
1559 	/* defer register writes to a sleepable context */
1560 	schedule_work(&pdata->set_multicast);
1561 }
1562 
1563 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1564 				      u16 lcladv, u16 rmtadv)
1565 {
1566 	u32 flow = 0, fct_flow = 0;
1567 	u8 cap;
1568 
1569 	if (dev->fc_autoneg)
1570 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1571 	else
1572 		cap = dev->fc_request_control;
1573 
1574 	if (cap & FLOW_CTRL_TX)
1575 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1576 
1577 	if (cap & FLOW_CTRL_RX)
1578 		flow |= FLOW_CR_RX_FCEN_;
1579 
1580 	if (dev->udev->speed == USB_SPEED_SUPER)
1581 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1582 	else if (dev->udev->speed == USB_SPEED_HIGH)
1583 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1584 
1585 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1586 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1587 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1588 
1589 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1590 
1591 	/* threshold value should be set before enabling flow */
1592 	lan78xx_write_reg(dev, FLOW, flow);
1593 
1594 	return 0;
1595 }
1596 
1597 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1598 
1599 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1600 {
1601 	unsigned long start_time = jiffies;
1602 	u32 val;
1603 	int ret;
1604 
1605 	mutex_lock(&dev->phy_mutex);
1606 
1607 	/* Resetting the device while there is activity on the MDIO
1608 	 * bus can result in the MAC interface locking up and not
1609 	 * completing register access transactions.
1610 	 */
1611 	ret = lan78xx_phy_wait_not_busy(dev);
1612 	if (ret < 0)
1613 		goto done;
1614 
1615 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1616 	if (ret < 0)
1617 		goto done;
1618 
1619 	val |= MAC_CR_RST_;
1620 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1621 	if (ret < 0)
1622 		goto done;
1623 
1624 	/* Wait for the reset to complete before allowing any further
1625 	 * MAC register accesses otherwise the MAC may lock up.
1626 	 */
1627 	do {
1628 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1629 		if (ret < 0)
1630 			goto done;
1631 
1632 		if (!(val & MAC_CR_RST_)) {
1633 			ret = 0;
1634 			goto done;
1635 		}
1636 	} while (!time_after(jiffies, start_time + HZ));
1637 
1638 	ret = -ETIMEDOUT;
1639 done:
1640 	mutex_unlock(&dev->phy_mutex);
1641 
1642 	return ret;
1643 }
1644 
1645 static int lan78xx_link_reset(struct lan78xx_net *dev)
1646 {
1647 	struct phy_device *phydev = dev->net->phydev;
1648 	struct ethtool_link_ksettings ecmd;
1649 	int ladv, radv, ret, link;
1650 	u32 buf;
1651 
1652 	/* clear LAN78xx interrupt status */
1653 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1654 	if (unlikely(ret < 0))
1655 		return ret;
1656 
1657 	mutex_lock(&phydev->lock);
1658 	phy_read_status(phydev);
1659 	link = phydev->link;
1660 	mutex_unlock(&phydev->lock);
1661 
1662 	if (!link && dev->link_on) {
1663 		dev->link_on = false;
1664 
1665 		/* reset MAC */
1666 		ret = lan78xx_mac_reset(dev);
1667 		if (ret < 0)
1668 			return ret;
1669 
1670 		del_timer(&dev->stat_monitor);
1671 	} else if (link && !dev->link_on) {
1672 		dev->link_on = true;
1673 
1674 		phy_ethtool_ksettings_get(phydev, &ecmd);
1675 
1676 		if (dev->udev->speed == USB_SPEED_SUPER) {
1677 			if (ecmd.base.speed == 1000) {
1678 				/* disable U2 */
1679 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1680 				if (ret < 0)
1681 					return ret;
1682 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1683 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1684 				if (ret < 0)
1685 					return ret;
1686 				/* enable U1 */
1687 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1688 				if (ret < 0)
1689 					return ret;
1690 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1691 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1692 				if (ret < 0)
1693 					return ret;
1694 			} else {
1695 				/* enable U1 & U2 */
1696 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1697 				if (ret < 0)
1698 					return ret;
1699 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1700 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1701 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1702 				if (ret < 0)
1703 					return ret;
1704 			}
1705 		}
1706 
1707 		ladv = phy_read(phydev, MII_ADVERTISE);
1708 		if (ladv < 0)
1709 			return ladv;
1710 
1711 		radv = phy_read(phydev, MII_LPA);
1712 		if (radv < 0)
1713 			return radv;
1714 
1715 		netif_dbg(dev, link, dev->net,
1716 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1717 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1718 
1719 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1720 						 radv);
1721 		if (ret < 0)
1722 			return ret;
1723 
1724 		if (!timer_pending(&dev->stat_monitor)) {
1725 			dev->delta = 1;
1726 			mod_timer(&dev->stat_monitor,
1727 				  jiffies + STAT_UPDATE_TIMER);
1728 		}
1729 
1730 		lan78xx_rx_urb_submit_all(dev);
1731 
1732 		local_bh_disable();
1733 		napi_schedule(&dev->napi);
1734 		local_bh_enable();
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 /* some work can't be done in tasklets, so we use keventd
1741  *
1742  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1743  * but tasklet_schedule() doesn't.	hope the failure is rare.
1744  */
1745 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1746 {
1747 	set_bit(work, &dev->flags);
1748 	if (!schedule_delayed_work(&dev->wq, 0))
1749 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1750 }
1751 
1752 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1753 {
1754 	u32 intdata;
1755 
1756 	if (urb->actual_length != 4) {
1757 		netdev_warn(dev->net,
1758 			    "unexpected urb length %d", urb->actual_length);
1759 		return;
1760 	}
1761 
1762 	intdata = get_unaligned_le32(urb->transfer_buffer);
1763 
1764 	if (intdata & INT_ENP_PHY_INT) {
1765 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1766 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1767 
1768 		if (dev->domain_data.phyirq > 0)
1769 			generic_handle_irq_safe(dev->domain_data.phyirq);
1770 	} else {
1771 		netdev_warn(dev->net,
1772 			    "unexpected interrupt: 0x%08x\n", intdata);
1773 	}
1774 }
1775 
1776 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1777 {
1778 	return MAX_EEPROM_SIZE;
1779 }
1780 
1781 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1782 				      struct ethtool_eeprom *ee, u8 *data)
1783 {
1784 	struct lan78xx_net *dev = netdev_priv(netdev);
1785 	int ret;
1786 
1787 	ret = usb_autopm_get_interface(dev->intf);
1788 	if (ret)
1789 		return ret;
1790 
1791 	ee->magic = LAN78XX_EEPROM_MAGIC;
1792 
1793 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1794 
1795 	usb_autopm_put_interface(dev->intf);
1796 
1797 	return ret;
1798 }
1799 
1800 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1801 				      struct ethtool_eeprom *ee, u8 *data)
1802 {
1803 	struct lan78xx_net *dev = netdev_priv(netdev);
1804 	int ret;
1805 
1806 	ret = usb_autopm_get_interface(dev->intf);
1807 	if (ret)
1808 		return ret;
1809 
1810 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1811 	 * to load data from EEPROM
1812 	 */
1813 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1814 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1815 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1816 		 (ee->offset == 0) &&
1817 		 (ee->len == 512) &&
1818 		 (data[0] == OTP_INDICATOR_1))
1819 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1820 
1821 	usb_autopm_put_interface(dev->intf);
1822 
1823 	return ret;
1824 }
1825 
1826 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1827 				u8 *data)
1828 {
1829 	if (stringset == ETH_SS_STATS)
1830 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1831 }
1832 
1833 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1834 {
1835 	if (sset == ETH_SS_STATS)
1836 		return ARRAY_SIZE(lan78xx_gstrings);
1837 	else
1838 		return -EOPNOTSUPP;
1839 }
1840 
1841 static void lan78xx_get_stats(struct net_device *netdev,
1842 			      struct ethtool_stats *stats, u64 *data)
1843 {
1844 	struct lan78xx_net *dev = netdev_priv(netdev);
1845 
1846 	lan78xx_update_stats(dev);
1847 
1848 	mutex_lock(&dev->stats.access_lock);
1849 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1850 	mutex_unlock(&dev->stats.access_lock);
1851 }
1852 
1853 static void lan78xx_get_wol(struct net_device *netdev,
1854 			    struct ethtool_wolinfo *wol)
1855 {
1856 	struct lan78xx_net *dev = netdev_priv(netdev);
1857 	int ret;
1858 	u32 buf;
1859 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1860 
1861 	if (usb_autopm_get_interface(dev->intf) < 0)
1862 		return;
1863 
1864 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1865 	if (unlikely(ret < 0)) {
1866 		wol->supported = 0;
1867 		wol->wolopts = 0;
1868 	} else {
1869 		if (buf & USB_CFG_RMT_WKP_) {
1870 			wol->supported = WAKE_ALL;
1871 			wol->wolopts = pdata->wol;
1872 		} else {
1873 			wol->supported = 0;
1874 			wol->wolopts = 0;
1875 		}
1876 	}
1877 
1878 	usb_autopm_put_interface(dev->intf);
1879 }
1880 
1881 static int lan78xx_set_wol(struct net_device *netdev,
1882 			   struct ethtool_wolinfo *wol)
1883 {
1884 	struct lan78xx_net *dev = netdev_priv(netdev);
1885 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1886 	int ret;
1887 
1888 	if (wol->wolopts & ~WAKE_ALL)
1889 		return -EINVAL;
1890 
1891 	ret = usb_autopm_get_interface(dev->intf);
1892 	if (ret < 0)
1893 		return ret;
1894 
1895 	pdata->wol = wol->wolopts;
1896 
1897 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1898 
1899 	phy_ethtool_set_wol(netdev->phydev, wol);
1900 
1901 	usb_autopm_put_interface(dev->intf);
1902 
1903 	return ret;
1904 }
1905 
1906 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1907 {
1908 	struct lan78xx_net *dev = netdev_priv(net);
1909 	struct phy_device *phydev = net->phydev;
1910 	int ret;
1911 	u32 buf;
1912 
1913 	ret = usb_autopm_get_interface(dev->intf);
1914 	if (ret < 0)
1915 		return ret;
1916 
1917 	ret = phy_ethtool_get_eee(phydev, edata);
1918 	if (ret < 0)
1919 		goto exit;
1920 
1921 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1922 	if (buf & MAC_CR_EEE_EN_) {
1923 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1924 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1925 		edata->tx_lpi_timer = buf;
1926 	} else {
1927 		edata->tx_lpi_timer = 0;
1928 	}
1929 
1930 	ret = 0;
1931 exit:
1932 	usb_autopm_put_interface(dev->intf);
1933 
1934 	return ret;
1935 }
1936 
1937 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1938 {
1939 	struct lan78xx_net *dev = netdev_priv(net);
1940 	int ret;
1941 	u32 buf;
1942 
1943 	ret = usb_autopm_get_interface(dev->intf);
1944 	if (ret < 0)
1945 		return ret;
1946 
1947 	ret = phy_ethtool_set_eee(net->phydev, edata);
1948 	if (ret < 0)
1949 		goto out;
1950 
1951 	buf = (u32)edata->tx_lpi_timer;
1952 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1953 out:
1954 	usb_autopm_put_interface(dev->intf);
1955 
1956 	return ret;
1957 }
1958 
1959 static u32 lan78xx_get_link(struct net_device *net)
1960 {
1961 	u32 link;
1962 
1963 	mutex_lock(&net->phydev->lock);
1964 	phy_read_status(net->phydev);
1965 	link = net->phydev->link;
1966 	mutex_unlock(&net->phydev->lock);
1967 
1968 	return link;
1969 }
1970 
1971 static void lan78xx_get_drvinfo(struct net_device *net,
1972 				struct ethtool_drvinfo *info)
1973 {
1974 	struct lan78xx_net *dev = netdev_priv(net);
1975 
1976 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1977 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1978 }
1979 
1980 static u32 lan78xx_get_msglevel(struct net_device *net)
1981 {
1982 	struct lan78xx_net *dev = netdev_priv(net);
1983 
1984 	return dev->msg_enable;
1985 }
1986 
1987 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1988 {
1989 	struct lan78xx_net *dev = netdev_priv(net);
1990 
1991 	dev->msg_enable = level;
1992 }
1993 
1994 static int lan78xx_get_link_ksettings(struct net_device *net,
1995 				      struct ethtool_link_ksettings *cmd)
1996 {
1997 	struct lan78xx_net *dev = netdev_priv(net);
1998 	struct phy_device *phydev = net->phydev;
1999 	int ret;
2000 
2001 	ret = usb_autopm_get_interface(dev->intf);
2002 	if (ret < 0)
2003 		return ret;
2004 
2005 	phy_ethtool_ksettings_get(phydev, cmd);
2006 
2007 	usb_autopm_put_interface(dev->intf);
2008 
2009 	return ret;
2010 }
2011 
2012 static int lan78xx_set_link_ksettings(struct net_device *net,
2013 				      const struct ethtool_link_ksettings *cmd)
2014 {
2015 	struct lan78xx_net *dev = netdev_priv(net);
2016 	struct phy_device *phydev = net->phydev;
2017 	int ret = 0;
2018 	int temp;
2019 
2020 	ret = usb_autopm_get_interface(dev->intf);
2021 	if (ret < 0)
2022 		return ret;
2023 
2024 	/* change speed & duplex */
2025 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2026 
2027 	if (!cmd->base.autoneg) {
2028 		/* force link down */
2029 		temp = phy_read(phydev, MII_BMCR);
2030 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2031 		mdelay(1);
2032 		phy_write(phydev, MII_BMCR, temp);
2033 	}
2034 
2035 	usb_autopm_put_interface(dev->intf);
2036 
2037 	return ret;
2038 }
2039 
2040 static void lan78xx_get_pause(struct net_device *net,
2041 			      struct ethtool_pauseparam *pause)
2042 {
2043 	struct lan78xx_net *dev = netdev_priv(net);
2044 	struct phy_device *phydev = net->phydev;
2045 	struct ethtool_link_ksettings ecmd;
2046 
2047 	phy_ethtool_ksettings_get(phydev, &ecmd);
2048 
2049 	pause->autoneg = dev->fc_autoneg;
2050 
2051 	if (dev->fc_request_control & FLOW_CTRL_TX)
2052 		pause->tx_pause = 1;
2053 
2054 	if (dev->fc_request_control & FLOW_CTRL_RX)
2055 		pause->rx_pause = 1;
2056 }
2057 
2058 static int lan78xx_set_pause(struct net_device *net,
2059 			     struct ethtool_pauseparam *pause)
2060 {
2061 	struct lan78xx_net *dev = netdev_priv(net);
2062 	struct phy_device *phydev = net->phydev;
2063 	struct ethtool_link_ksettings ecmd;
2064 	int ret;
2065 
2066 	phy_ethtool_ksettings_get(phydev, &ecmd);
2067 
2068 	if (pause->autoneg && !ecmd.base.autoneg) {
2069 		ret = -EINVAL;
2070 		goto exit;
2071 	}
2072 
2073 	dev->fc_request_control = 0;
2074 	if (pause->rx_pause)
2075 		dev->fc_request_control |= FLOW_CTRL_RX;
2076 
2077 	if (pause->tx_pause)
2078 		dev->fc_request_control |= FLOW_CTRL_TX;
2079 
2080 	if (ecmd.base.autoneg) {
2081 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2082 		u32 mii_adv;
2083 
2084 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2085 				   ecmd.link_modes.advertising);
2086 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2087 				   ecmd.link_modes.advertising);
2088 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2089 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2090 		linkmode_or(ecmd.link_modes.advertising, fc,
2091 			    ecmd.link_modes.advertising);
2092 
2093 		phy_ethtool_ksettings_set(phydev, &ecmd);
2094 	}
2095 
2096 	dev->fc_autoneg = pause->autoneg;
2097 
2098 	ret = 0;
2099 exit:
2100 	return ret;
2101 }
2102 
2103 static int lan78xx_get_regs_len(struct net_device *netdev)
2104 {
2105 	if (!netdev->phydev)
2106 		return (sizeof(lan78xx_regs));
2107 	else
2108 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
2109 }
2110 
2111 static void
2112 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2113 		 void *buf)
2114 {
2115 	u32 *data = buf;
2116 	int i, j;
2117 	struct lan78xx_net *dev = netdev_priv(netdev);
2118 
2119 	/* Read Device/MAC registers */
2120 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
2121 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2122 
2123 	if (!netdev->phydev)
2124 		return;
2125 
2126 	/* Read PHY registers */
2127 	for (j = 0; j < 32; i++, j++)
2128 		data[i] = phy_read(netdev->phydev, j);
2129 }
2130 
2131 static const struct ethtool_ops lan78xx_ethtool_ops = {
2132 	.get_link	= lan78xx_get_link,
2133 	.nway_reset	= phy_ethtool_nway_reset,
2134 	.get_drvinfo	= lan78xx_get_drvinfo,
2135 	.get_msglevel	= lan78xx_get_msglevel,
2136 	.set_msglevel	= lan78xx_set_msglevel,
2137 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2138 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2139 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2140 	.get_ethtool_stats = lan78xx_get_stats,
2141 	.get_sset_count = lan78xx_get_sset_count,
2142 	.get_strings	= lan78xx_get_strings,
2143 	.get_wol	= lan78xx_get_wol,
2144 	.set_wol	= lan78xx_set_wol,
2145 	.get_ts_info	= ethtool_op_get_ts_info,
2146 	.get_eee	= lan78xx_get_eee,
2147 	.set_eee	= lan78xx_set_eee,
2148 	.get_pauseparam	= lan78xx_get_pause,
2149 	.set_pauseparam	= lan78xx_set_pause,
2150 	.get_link_ksettings = lan78xx_get_link_ksettings,
2151 	.set_link_ksettings = lan78xx_set_link_ksettings,
2152 	.get_regs_len	= lan78xx_get_regs_len,
2153 	.get_regs	= lan78xx_get_regs,
2154 };
2155 
2156 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
2157 {
2158 	u32 addr_lo, addr_hi;
2159 	u8 addr[6];
2160 
2161 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2162 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2163 
2164 	addr[0] = addr_lo & 0xFF;
2165 	addr[1] = (addr_lo >> 8) & 0xFF;
2166 	addr[2] = (addr_lo >> 16) & 0xFF;
2167 	addr[3] = (addr_lo >> 24) & 0xFF;
2168 	addr[4] = addr_hi & 0xFF;
2169 	addr[5] = (addr_hi >> 8) & 0xFF;
2170 
2171 	if (!is_valid_ether_addr(addr)) {
2172 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2173 			/* valid address present in Device Tree */
2174 			netif_dbg(dev, ifup, dev->net,
2175 				  "MAC address read from Device Tree");
2176 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2177 						 ETH_ALEN, addr) == 0) ||
2178 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2179 					      ETH_ALEN, addr) == 0)) &&
2180 			   is_valid_ether_addr(addr)) {
2181 			/* eeprom values are valid so use them */
2182 			netif_dbg(dev, ifup, dev->net,
2183 				  "MAC address read from EEPROM");
2184 		} else {
2185 			/* generate random MAC */
2186 			eth_random_addr(addr);
2187 			netif_dbg(dev, ifup, dev->net,
2188 				  "MAC address set to random addr");
2189 		}
2190 
2191 		addr_lo = addr[0] | (addr[1] << 8) |
2192 			  (addr[2] << 16) | (addr[3] << 24);
2193 		addr_hi = addr[4] | (addr[5] << 8);
2194 
2195 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2196 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2197 	}
2198 
2199 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2200 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2201 
2202 	eth_hw_addr_set(dev->net, addr);
2203 }
2204 
2205 /* MDIO read and write wrappers for phylib */
2206 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2207 {
2208 	struct lan78xx_net *dev = bus->priv;
2209 	u32 val, addr;
2210 	int ret;
2211 
2212 	ret = usb_autopm_get_interface(dev->intf);
2213 	if (ret < 0)
2214 		return ret;
2215 
2216 	mutex_lock(&dev->phy_mutex);
2217 
2218 	/* confirm MII not busy */
2219 	ret = lan78xx_phy_wait_not_busy(dev);
2220 	if (ret < 0)
2221 		goto done;
2222 
2223 	/* set the address, index & direction (read from PHY) */
2224 	addr = mii_access(phy_id, idx, MII_READ);
2225 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2226 	if (ret < 0)
2227 		goto done;
2228 
2229 	ret = lan78xx_phy_wait_not_busy(dev);
2230 	if (ret < 0)
2231 		goto done;
2232 
2233 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2234 	if (ret < 0)
2235 		goto done;
2236 
2237 	ret = (int)(val & 0xFFFF);
2238 
2239 done:
2240 	mutex_unlock(&dev->phy_mutex);
2241 	usb_autopm_put_interface(dev->intf);
2242 
2243 	return ret;
2244 }
2245 
2246 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2247 				 u16 regval)
2248 {
2249 	struct lan78xx_net *dev = bus->priv;
2250 	u32 val, addr;
2251 	int ret;
2252 
2253 	ret = usb_autopm_get_interface(dev->intf);
2254 	if (ret < 0)
2255 		return ret;
2256 
2257 	mutex_lock(&dev->phy_mutex);
2258 
2259 	/* confirm MII not busy */
2260 	ret = lan78xx_phy_wait_not_busy(dev);
2261 	if (ret < 0)
2262 		goto done;
2263 
2264 	val = (u32)regval;
2265 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2266 	if (ret < 0)
2267 		goto done;
2268 
2269 	/* set the address, index & direction (write to PHY) */
2270 	addr = mii_access(phy_id, idx, MII_WRITE);
2271 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2272 	if (ret < 0)
2273 		goto done;
2274 
2275 	ret = lan78xx_phy_wait_not_busy(dev);
2276 	if (ret < 0)
2277 		goto done;
2278 
2279 done:
2280 	mutex_unlock(&dev->phy_mutex);
2281 	usb_autopm_put_interface(dev->intf);
2282 	return ret;
2283 }
2284 
2285 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2286 {
2287 	struct device_node *node;
2288 	int ret;
2289 
2290 	dev->mdiobus = mdiobus_alloc();
2291 	if (!dev->mdiobus) {
2292 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2293 		return -ENOMEM;
2294 	}
2295 
2296 	dev->mdiobus->priv = (void *)dev;
2297 	dev->mdiobus->read = lan78xx_mdiobus_read;
2298 	dev->mdiobus->write = lan78xx_mdiobus_write;
2299 	dev->mdiobus->name = "lan78xx-mdiobus";
2300 	dev->mdiobus->parent = &dev->udev->dev;
2301 
2302 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2303 		 dev->udev->bus->busnum, dev->udev->devnum);
2304 
2305 	switch (dev->chipid) {
2306 	case ID_REV_CHIP_ID_7800_:
2307 	case ID_REV_CHIP_ID_7850_:
2308 		/* set to internal PHY id */
2309 		dev->mdiobus->phy_mask = ~(1 << 1);
2310 		break;
2311 	case ID_REV_CHIP_ID_7801_:
2312 		/* scan thru PHYAD[2..0] */
2313 		dev->mdiobus->phy_mask = ~(0xFF);
2314 		break;
2315 	}
2316 
2317 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2318 	ret = of_mdiobus_register(dev->mdiobus, node);
2319 	of_node_put(node);
2320 	if (ret) {
2321 		netdev_err(dev->net, "can't register MDIO bus\n");
2322 		goto exit1;
2323 	}
2324 
2325 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2326 	return 0;
2327 exit1:
2328 	mdiobus_free(dev->mdiobus);
2329 	return ret;
2330 }
2331 
2332 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2333 {
2334 	mdiobus_unregister(dev->mdiobus);
2335 	mdiobus_free(dev->mdiobus);
2336 }
2337 
2338 static void lan78xx_link_status_change(struct net_device *net)
2339 {
2340 	struct lan78xx_net *dev = netdev_priv(net);
2341 	struct phy_device *phydev = net->phydev;
2342 	u32 data;
2343 	int ret;
2344 
2345 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2346 	if (ret < 0)
2347 		return;
2348 
2349 	if (phydev->enable_tx_lpi)
2350 		data |=  MAC_CR_EEE_EN_;
2351 	else
2352 		data &= ~MAC_CR_EEE_EN_;
2353 	lan78xx_write_reg(dev, MAC_CR, data);
2354 
2355 	phy_print_status(phydev);
2356 }
2357 
2358 static int irq_map(struct irq_domain *d, unsigned int irq,
2359 		   irq_hw_number_t hwirq)
2360 {
2361 	struct irq_domain_data *data = d->host_data;
2362 
2363 	irq_set_chip_data(irq, data);
2364 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2365 	irq_set_noprobe(irq);
2366 
2367 	return 0;
2368 }
2369 
2370 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2371 {
2372 	irq_set_chip_and_handler(irq, NULL, NULL);
2373 	irq_set_chip_data(irq, NULL);
2374 }
2375 
2376 static const struct irq_domain_ops chip_domain_ops = {
2377 	.map	= irq_map,
2378 	.unmap	= irq_unmap,
2379 };
2380 
2381 static void lan78xx_irq_mask(struct irq_data *irqd)
2382 {
2383 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2384 
2385 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2386 }
2387 
2388 static void lan78xx_irq_unmask(struct irq_data *irqd)
2389 {
2390 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2391 
2392 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2393 }
2394 
2395 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2396 {
2397 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2398 
2399 	mutex_lock(&data->irq_lock);
2400 }
2401 
2402 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2403 {
2404 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2405 	struct lan78xx_net *dev =
2406 			container_of(data, struct lan78xx_net, domain_data);
2407 	u32 buf;
2408 	int ret;
2409 
2410 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2411 	 * are only two callbacks executed in non-atomic contex.
2412 	 */
2413 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2414 	if (ret < 0)
2415 		goto irq_bus_sync_unlock;
2416 
2417 	if (buf != data->irqenable)
2418 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2419 
2420 irq_bus_sync_unlock:
2421 	if (ret < 0)
2422 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2423 			   ERR_PTR(ret));
2424 
2425 	mutex_unlock(&data->irq_lock);
2426 }
2427 
2428 static struct irq_chip lan78xx_irqchip = {
2429 	.name			= "lan78xx-irqs",
2430 	.irq_mask		= lan78xx_irq_mask,
2431 	.irq_unmask		= lan78xx_irq_unmask,
2432 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2433 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2434 };
2435 
2436 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2437 {
2438 	struct device_node *of_node;
2439 	struct irq_domain *irqdomain;
2440 	unsigned int irqmap = 0;
2441 	u32 buf;
2442 	int ret = 0;
2443 
2444 	of_node = dev->udev->dev.parent->of_node;
2445 
2446 	mutex_init(&dev->domain_data.irq_lock);
2447 
2448 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2449 	dev->domain_data.irqenable = buf;
2450 
2451 	dev->domain_data.irqchip = &lan78xx_irqchip;
2452 	dev->domain_data.irq_handler = handle_simple_irq;
2453 
2454 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2455 					  &chip_domain_ops, &dev->domain_data);
2456 	if (irqdomain) {
2457 		/* create mapping for PHY interrupt */
2458 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2459 		if (!irqmap) {
2460 			irq_domain_remove(irqdomain);
2461 
2462 			irqdomain = NULL;
2463 			ret = -EINVAL;
2464 		}
2465 	} else {
2466 		ret = -EINVAL;
2467 	}
2468 
2469 	dev->domain_data.irqdomain = irqdomain;
2470 	dev->domain_data.phyirq = irqmap;
2471 
2472 	return ret;
2473 }
2474 
2475 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2476 {
2477 	if (dev->domain_data.phyirq > 0) {
2478 		irq_dispose_mapping(dev->domain_data.phyirq);
2479 
2480 		if (dev->domain_data.irqdomain)
2481 			irq_domain_remove(dev->domain_data.irqdomain);
2482 	}
2483 	dev->domain_data.phyirq = 0;
2484 	dev->domain_data.irqdomain = NULL;
2485 }
2486 
2487 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2488 {
2489 	u32 buf;
2490 	int ret;
2491 	struct fixed_phy_status fphy_status = {
2492 		.link = 1,
2493 		.speed = SPEED_1000,
2494 		.duplex = DUPLEX_FULL,
2495 	};
2496 	struct phy_device *phydev;
2497 
2498 	phydev = phy_find_first(dev->mdiobus);
2499 	if (!phydev) {
2500 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2501 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2502 		if (IS_ERR(phydev)) {
2503 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2504 			return NULL;
2505 		}
2506 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2507 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2508 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2509 					MAC_RGMII_ID_TXC_DELAY_EN_);
2510 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2511 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2512 		buf |= HW_CFG_CLK125_EN_;
2513 		buf |= HW_CFG_REFCLK25_EN_;
2514 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2515 	} else {
2516 		if (!phydev->drv) {
2517 			netdev_err(dev->net, "no PHY driver found\n");
2518 			return NULL;
2519 		}
2520 		dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2521 		/* The PHY driver is responsible to configure proper RGMII
2522 		 * interface delays. Disable RGMII delays on MAC side.
2523 		 */
2524 		lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2525 
2526 		phydev->is_internal = false;
2527 	}
2528 	return phydev;
2529 }
2530 
2531 static int lan78xx_phy_init(struct lan78xx_net *dev)
2532 {
2533 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2534 	int ret;
2535 	u32 mii_adv;
2536 	struct phy_device *phydev;
2537 
2538 	switch (dev->chipid) {
2539 	case ID_REV_CHIP_ID_7801_:
2540 		phydev = lan7801_phy_init(dev);
2541 		if (!phydev) {
2542 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2543 			return -EIO;
2544 		}
2545 		break;
2546 
2547 	case ID_REV_CHIP_ID_7800_:
2548 	case ID_REV_CHIP_ID_7850_:
2549 		phydev = phy_find_first(dev->mdiobus);
2550 		if (!phydev) {
2551 			netdev_err(dev->net, "no PHY found\n");
2552 			return -EIO;
2553 		}
2554 		phydev->is_internal = true;
2555 		dev->interface = PHY_INTERFACE_MODE_GMII;
2556 		break;
2557 
2558 	default:
2559 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2560 		return -EIO;
2561 	}
2562 
2563 	/* if phyirq is not set, use polling mode in phylib */
2564 	if (dev->domain_data.phyirq > 0)
2565 		phydev->irq = dev->domain_data.phyirq;
2566 	else
2567 		phydev->irq = PHY_POLL;
2568 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2569 
2570 	/* set to AUTOMDIX */
2571 	phydev->mdix = ETH_TP_MDI_AUTO;
2572 
2573 	ret = phy_connect_direct(dev->net, phydev,
2574 				 lan78xx_link_status_change,
2575 				 dev->interface);
2576 	if (ret) {
2577 		netdev_err(dev->net, "can't attach PHY to %s\n",
2578 			   dev->mdiobus->id);
2579 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2580 			if (phy_is_pseudo_fixed_link(phydev)) {
2581 				fixed_phy_unregister(phydev);
2582 				phy_device_free(phydev);
2583 			}
2584 		}
2585 		return -EIO;
2586 	}
2587 
2588 	/* MAC doesn't support 1000T Half */
2589 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2590 
2591 	/* support both flow controls */
2592 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2593 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2594 			   phydev->advertising);
2595 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2596 			   phydev->advertising);
2597 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2598 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2599 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2600 
2601 	phy_support_eee(phydev);
2602 
2603 	if (phydev->mdio.dev.of_node) {
2604 		u32 reg;
2605 		int len;
2606 
2607 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2608 						      "microchip,led-modes",
2609 						      sizeof(u32));
2610 		if (len >= 0) {
2611 			/* Ensure the appropriate LEDs are enabled */
2612 			lan78xx_read_reg(dev, HW_CFG, &reg);
2613 			reg &= ~(HW_CFG_LED0_EN_ |
2614 				 HW_CFG_LED1_EN_ |
2615 				 HW_CFG_LED2_EN_ |
2616 				 HW_CFG_LED3_EN_);
2617 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2618 				(len > 1) * HW_CFG_LED1_EN_ |
2619 				(len > 2) * HW_CFG_LED2_EN_ |
2620 				(len > 3) * HW_CFG_LED3_EN_;
2621 			lan78xx_write_reg(dev, HW_CFG, reg);
2622 		}
2623 	}
2624 
2625 	genphy_config_aneg(phydev);
2626 
2627 	dev->fc_autoneg = phydev->autoneg;
2628 
2629 	return 0;
2630 }
2631 
2632 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2633 {
2634 	bool rxenabled;
2635 	u32 buf;
2636 	int ret;
2637 
2638 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2639 	if (ret < 0)
2640 		return ret;
2641 
2642 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2643 
2644 	if (rxenabled) {
2645 		buf &= ~MAC_RX_RXEN_;
2646 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2647 		if (ret < 0)
2648 			return ret;
2649 	}
2650 
2651 	/* add 4 to size for FCS */
2652 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2653 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2654 
2655 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2656 	if (ret < 0)
2657 		return ret;
2658 
2659 	if (rxenabled) {
2660 		buf |= MAC_RX_RXEN_;
2661 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2662 		if (ret < 0)
2663 			return ret;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2670 {
2671 	struct sk_buff *skb;
2672 	unsigned long flags;
2673 	int count = 0;
2674 
2675 	spin_lock_irqsave(&q->lock, flags);
2676 	while (!skb_queue_empty(q)) {
2677 		struct skb_data	*entry;
2678 		struct urb *urb;
2679 		int ret;
2680 
2681 		skb_queue_walk(q, skb) {
2682 			entry = (struct skb_data *)skb->cb;
2683 			if (entry->state != unlink_start)
2684 				goto found;
2685 		}
2686 		break;
2687 found:
2688 		entry->state = unlink_start;
2689 		urb = entry->urb;
2690 
2691 		/* Get reference count of the URB to avoid it to be
2692 		 * freed during usb_unlink_urb, which may trigger
2693 		 * use-after-free problem inside usb_unlink_urb since
2694 		 * usb_unlink_urb is always racing with .complete
2695 		 * handler(include defer_bh).
2696 		 */
2697 		usb_get_urb(urb);
2698 		spin_unlock_irqrestore(&q->lock, flags);
2699 		/* during some PM-driven resume scenarios,
2700 		 * these (async) unlinks complete immediately
2701 		 */
2702 		ret = usb_unlink_urb(urb);
2703 		if (ret != -EINPROGRESS && ret != 0)
2704 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2705 		else
2706 			count++;
2707 		usb_put_urb(urb);
2708 		spin_lock_irqsave(&q->lock, flags);
2709 	}
2710 	spin_unlock_irqrestore(&q->lock, flags);
2711 	return count;
2712 }
2713 
2714 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2715 {
2716 	struct lan78xx_net *dev = netdev_priv(netdev);
2717 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2718 	int ret;
2719 
2720 	/* no second zero-length packet read wanted after mtu-sized packets */
2721 	if ((max_frame_len % dev->maxpacket) == 0)
2722 		return -EDOM;
2723 
2724 	ret = usb_autopm_get_interface(dev->intf);
2725 	if (ret < 0)
2726 		return ret;
2727 
2728 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2729 	if (ret < 0)
2730 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2731 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2732 	else
2733 		WRITE_ONCE(netdev->mtu, new_mtu);
2734 
2735 	usb_autopm_put_interface(dev->intf);
2736 
2737 	return ret;
2738 }
2739 
2740 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2741 {
2742 	struct lan78xx_net *dev = netdev_priv(netdev);
2743 	struct sockaddr *addr = p;
2744 	u32 addr_lo, addr_hi;
2745 
2746 	if (netif_running(netdev))
2747 		return -EBUSY;
2748 
2749 	if (!is_valid_ether_addr(addr->sa_data))
2750 		return -EADDRNOTAVAIL;
2751 
2752 	eth_hw_addr_set(netdev, addr->sa_data);
2753 
2754 	addr_lo = netdev->dev_addr[0] |
2755 		  netdev->dev_addr[1] << 8 |
2756 		  netdev->dev_addr[2] << 16 |
2757 		  netdev->dev_addr[3] << 24;
2758 	addr_hi = netdev->dev_addr[4] |
2759 		  netdev->dev_addr[5] << 8;
2760 
2761 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2762 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2763 
2764 	/* Added to support MAC address changes */
2765 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2766 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2767 
2768 	return 0;
2769 }
2770 
2771 /* Enable or disable Rx checksum offload engine */
2772 static int lan78xx_set_features(struct net_device *netdev,
2773 				netdev_features_t features)
2774 {
2775 	struct lan78xx_net *dev = netdev_priv(netdev);
2776 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2777 	unsigned long flags;
2778 
2779 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2780 
2781 	if (features & NETIF_F_RXCSUM) {
2782 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2783 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2784 	} else {
2785 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2786 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2787 	}
2788 
2789 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2790 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2791 	else
2792 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2793 
2794 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2795 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2796 	else
2797 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2798 
2799 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2800 
2801 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2802 
2803 	return 0;
2804 }
2805 
2806 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2807 {
2808 	struct lan78xx_priv *pdata =
2809 			container_of(param, struct lan78xx_priv, set_vlan);
2810 	struct lan78xx_net *dev = pdata->dev;
2811 
2812 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2813 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2814 }
2815 
2816 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2817 				   __be16 proto, u16 vid)
2818 {
2819 	struct lan78xx_net *dev = netdev_priv(netdev);
2820 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2821 	u16 vid_bit_index;
2822 	u16 vid_dword_index;
2823 
2824 	vid_dword_index = (vid >> 5) & 0x7F;
2825 	vid_bit_index = vid & 0x1F;
2826 
2827 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2828 
2829 	/* defer register writes to a sleepable context */
2830 	schedule_work(&pdata->set_vlan);
2831 
2832 	return 0;
2833 }
2834 
2835 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2836 				    __be16 proto, u16 vid)
2837 {
2838 	struct lan78xx_net *dev = netdev_priv(netdev);
2839 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2840 	u16 vid_bit_index;
2841 	u16 vid_dword_index;
2842 
2843 	vid_dword_index = (vid >> 5) & 0x7F;
2844 	vid_bit_index = vid & 0x1F;
2845 
2846 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2847 
2848 	/* defer register writes to a sleepable context */
2849 	schedule_work(&pdata->set_vlan);
2850 
2851 	return 0;
2852 }
2853 
2854 static int lan78xx_init_ltm(struct lan78xx_net *dev)
2855 {
2856 	u32 regs[6] = { 0 };
2857 	int ret;
2858 	u32 buf;
2859 
2860 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2861 	if (ret < 0)
2862 		goto init_ltm_failed;
2863 
2864 	if (buf & USB_CFG1_LTM_ENABLE_) {
2865 		u8 temp[2];
2866 		/* Get values from EEPROM first */
2867 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2868 			if (temp[0] == 24) {
2869 				ret = lan78xx_read_raw_eeprom(dev,
2870 							      temp[1] * 2,
2871 							      24,
2872 							      (u8 *)regs);
2873 				if (ret < 0)
2874 					return ret;
2875 			}
2876 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2877 			if (temp[0] == 24) {
2878 				ret = lan78xx_read_raw_otp(dev,
2879 							   temp[1] * 2,
2880 							   24,
2881 							   (u8 *)regs);
2882 				if (ret < 0)
2883 					return ret;
2884 			}
2885 		}
2886 	}
2887 
2888 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2889 	if (ret < 0)
2890 		goto init_ltm_failed;
2891 
2892 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2893 	if (ret < 0)
2894 		goto init_ltm_failed;
2895 
2896 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2897 	if (ret < 0)
2898 		goto init_ltm_failed;
2899 
2900 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2901 	if (ret < 0)
2902 		goto init_ltm_failed;
2903 
2904 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2905 	if (ret < 0)
2906 		goto init_ltm_failed;
2907 
2908 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2909 	if (ret < 0)
2910 		goto init_ltm_failed;
2911 
2912 	return 0;
2913 
2914 init_ltm_failed:
2915 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
2916 	return ret;
2917 }
2918 
2919 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2920 {
2921 	int result = 0;
2922 
2923 	switch (dev->udev->speed) {
2924 	case USB_SPEED_SUPER:
2925 		dev->rx_urb_size = RX_SS_URB_SIZE;
2926 		dev->tx_urb_size = TX_SS_URB_SIZE;
2927 		dev->n_rx_urbs = RX_SS_URB_NUM;
2928 		dev->n_tx_urbs = TX_SS_URB_NUM;
2929 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2930 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2931 		break;
2932 	case USB_SPEED_HIGH:
2933 		dev->rx_urb_size = RX_HS_URB_SIZE;
2934 		dev->tx_urb_size = TX_HS_URB_SIZE;
2935 		dev->n_rx_urbs = RX_HS_URB_NUM;
2936 		dev->n_tx_urbs = TX_HS_URB_NUM;
2937 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2938 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2939 		break;
2940 	case USB_SPEED_FULL:
2941 		dev->rx_urb_size = RX_FS_URB_SIZE;
2942 		dev->tx_urb_size = TX_FS_URB_SIZE;
2943 		dev->n_rx_urbs = RX_FS_URB_NUM;
2944 		dev->n_tx_urbs = TX_FS_URB_NUM;
2945 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2946 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2947 		break;
2948 	default:
2949 		netdev_warn(dev->net, "USB bus speed not supported\n");
2950 		result = -EIO;
2951 		break;
2952 	}
2953 
2954 	return result;
2955 }
2956 
2957 static int lan78xx_reset(struct lan78xx_net *dev)
2958 {
2959 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2960 	unsigned long timeout;
2961 	int ret;
2962 	u32 buf;
2963 	u8 sig;
2964 
2965 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	buf |= HW_CFG_LRST_;
2970 
2971 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	timeout = jiffies + HZ;
2976 	do {
2977 		mdelay(1);
2978 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2979 		if (ret < 0)
2980 			return ret;
2981 
2982 		if (time_after(jiffies, timeout)) {
2983 			netdev_warn(dev->net,
2984 				    "timeout on completion of LiteReset");
2985 			ret = -ETIMEDOUT;
2986 			return ret;
2987 		}
2988 	} while (buf & HW_CFG_LRST_);
2989 
2990 	lan78xx_init_mac_address(dev);
2991 
2992 	/* save DEVID for later usage */
2993 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2998 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2999 
3000 	/* Respond to the IN token with a NAK */
3001 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3002 	if (ret < 0)
3003 		return ret;
3004 
3005 	buf |= USB_CFG_BIR_;
3006 
3007 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3008 	if (ret < 0)
3009 		return ret;
3010 
3011 	/* Init LTM */
3012 	ret = lan78xx_init_ltm(dev);
3013 	if (ret < 0)
3014 		return ret;
3015 
3016 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3017 	if (ret < 0)
3018 		return ret;
3019 
3020 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3021 	if (ret < 0)
3022 		return ret;
3023 
3024 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3025 	if (ret < 0)
3026 		return ret;
3027 
3028 	buf |= HW_CFG_MEF_;
3029 	buf |= HW_CFG_CLK125_EN_;
3030 	buf |= HW_CFG_REFCLK25_EN_;
3031 
3032 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3033 	if (ret < 0)
3034 		return ret;
3035 
3036 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3037 	if (ret < 0)
3038 		return ret;
3039 
3040 	buf |= USB_CFG_BCE_;
3041 
3042 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3043 	if (ret < 0)
3044 		return ret;
3045 
3046 	/* set FIFO sizes */
3047 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3048 
3049 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3050 	if (ret < 0)
3051 		return ret;
3052 
3053 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3054 
3055 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3056 	if (ret < 0)
3057 		return ret;
3058 
3059 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3060 	if (ret < 0)
3061 		return ret;
3062 
3063 	ret = lan78xx_write_reg(dev, FLOW, 0);
3064 	if (ret < 0)
3065 		return ret;
3066 
3067 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3068 	if (ret < 0)
3069 		return ret;
3070 
3071 	/* Don't need rfe_ctl_lock during initialisation */
3072 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3073 	if (ret < 0)
3074 		return ret;
3075 
3076 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3077 
3078 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3079 	if (ret < 0)
3080 		return ret;
3081 
3082 	/* Enable or disable checksum offload engines */
3083 	ret = lan78xx_set_features(dev->net, dev->net->features);
3084 	if (ret < 0)
3085 		return ret;
3086 
3087 	lan78xx_set_multicast(dev->net);
3088 
3089 	/* reset PHY */
3090 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3091 	if (ret < 0)
3092 		return ret;
3093 
3094 	buf |= PMT_CTL_PHY_RST_;
3095 
3096 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3097 	if (ret < 0)
3098 		return ret;
3099 
3100 	timeout = jiffies + HZ;
3101 	do {
3102 		mdelay(1);
3103 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3104 		if (ret < 0)
3105 			return ret;
3106 
3107 		if (time_after(jiffies, timeout)) {
3108 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3109 			ret = -ETIMEDOUT;
3110 			return ret;
3111 		}
3112 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3113 
3114 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3115 	if (ret < 0)
3116 		return ret;
3117 
3118 	/* LAN7801 only has RGMII mode */
3119 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3120 		buf &= ~MAC_CR_GMII_EN_;
3121 		/* Enable Auto Duplex and Auto speed */
3122 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3123 	}
3124 
3125 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3126 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3127 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3128 		if (!ret && sig != EEPROM_INDICATOR) {
3129 			/* Implies there is no external eeprom. Set mac speed */
3130 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3131 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3132 		}
3133 	}
3134 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3135 	if (ret < 0)
3136 		return ret;
3137 
3138 	ret = lan78xx_set_rx_max_frame_length(dev,
3139 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3140 
3141 	return ret;
3142 }
3143 
3144 static void lan78xx_init_stats(struct lan78xx_net *dev)
3145 {
3146 	u32 *p;
3147 	int i;
3148 
3149 	/* initialize for stats update
3150 	 * some counters are 20bits and some are 32bits
3151 	 */
3152 	p = (u32 *)&dev->stats.rollover_max;
3153 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3154 		p[i] = 0xFFFFF;
3155 
3156 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3157 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3158 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3159 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3160 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3161 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3162 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3163 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3164 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3165 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3166 
3167 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3168 }
3169 
3170 static int lan78xx_open(struct net_device *net)
3171 {
3172 	struct lan78xx_net *dev = netdev_priv(net);
3173 	int ret;
3174 
3175 	netif_dbg(dev, ifup, dev->net, "open device");
3176 
3177 	ret = usb_autopm_get_interface(dev->intf);
3178 	if (ret < 0)
3179 		return ret;
3180 
3181 	mutex_lock(&dev->dev_mutex);
3182 
3183 	phy_start(net->phydev);
3184 
3185 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3186 
3187 	/* for Link Check */
3188 	if (dev->urb_intr) {
3189 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3190 		if (ret < 0) {
3191 			netif_err(dev, ifup, dev->net,
3192 				  "intr submit %d\n", ret);
3193 			goto done;
3194 		}
3195 	}
3196 
3197 	ret = lan78xx_flush_rx_fifo(dev);
3198 	if (ret < 0)
3199 		goto done;
3200 	ret = lan78xx_flush_tx_fifo(dev);
3201 	if (ret < 0)
3202 		goto done;
3203 
3204 	ret = lan78xx_start_tx_path(dev);
3205 	if (ret < 0)
3206 		goto done;
3207 	ret = lan78xx_start_rx_path(dev);
3208 	if (ret < 0)
3209 		goto done;
3210 
3211 	lan78xx_init_stats(dev);
3212 
3213 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3214 
3215 	netif_start_queue(net);
3216 
3217 	dev->link_on = false;
3218 
3219 	napi_enable(&dev->napi);
3220 
3221 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3222 done:
3223 	mutex_unlock(&dev->dev_mutex);
3224 
3225 	if (ret < 0)
3226 		usb_autopm_put_interface(dev->intf);
3227 
3228 	return ret;
3229 }
3230 
3231 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3232 {
3233 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3234 	DECLARE_WAITQUEUE(wait, current);
3235 	int temp;
3236 
3237 	/* ensure there are no more active urbs */
3238 	add_wait_queue(&unlink_wakeup, &wait);
3239 	set_current_state(TASK_UNINTERRUPTIBLE);
3240 	dev->wait = &unlink_wakeup;
3241 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3242 
3243 	/* maybe wait for deletions to finish. */
3244 	while (!skb_queue_empty(&dev->rxq) ||
3245 	       !skb_queue_empty(&dev->txq)) {
3246 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3247 		set_current_state(TASK_UNINTERRUPTIBLE);
3248 		netif_dbg(dev, ifdown, dev->net,
3249 			  "waited for %d urb completions", temp);
3250 	}
3251 	set_current_state(TASK_RUNNING);
3252 	dev->wait = NULL;
3253 	remove_wait_queue(&unlink_wakeup, &wait);
3254 
3255 	/* empty Rx done, Rx overflow and Tx pend queues
3256 	 */
3257 	while (!skb_queue_empty(&dev->rxq_done)) {
3258 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3259 
3260 		lan78xx_release_rx_buf(dev, skb);
3261 	}
3262 
3263 	skb_queue_purge(&dev->rxq_overflow);
3264 	skb_queue_purge(&dev->txq_pend);
3265 }
3266 
3267 static int lan78xx_stop(struct net_device *net)
3268 {
3269 	struct lan78xx_net *dev = netdev_priv(net);
3270 
3271 	netif_dbg(dev, ifup, dev->net, "stop device");
3272 
3273 	mutex_lock(&dev->dev_mutex);
3274 
3275 	if (timer_pending(&dev->stat_monitor))
3276 		del_timer_sync(&dev->stat_monitor);
3277 
3278 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3279 	netif_stop_queue(net);
3280 	napi_disable(&dev->napi);
3281 
3282 	lan78xx_terminate_urbs(dev);
3283 
3284 	netif_info(dev, ifdown, dev->net,
3285 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3286 		   net->stats.rx_packets, net->stats.tx_packets,
3287 		   net->stats.rx_errors, net->stats.tx_errors);
3288 
3289 	/* ignore errors that occur stopping the Tx and Rx data paths */
3290 	lan78xx_stop_tx_path(dev);
3291 	lan78xx_stop_rx_path(dev);
3292 
3293 	if (net->phydev)
3294 		phy_stop(net->phydev);
3295 
3296 	usb_kill_urb(dev->urb_intr);
3297 
3298 	/* deferred work (task, timer, softirq) must also stop.
3299 	 * can't flush_scheduled_work() until we drop rtnl (later),
3300 	 * else workers could deadlock; so make workers a NOP.
3301 	 */
3302 	clear_bit(EVENT_TX_HALT, &dev->flags);
3303 	clear_bit(EVENT_RX_HALT, &dev->flags);
3304 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3305 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3306 
3307 	cancel_delayed_work_sync(&dev->wq);
3308 
3309 	usb_autopm_put_interface(dev->intf);
3310 
3311 	mutex_unlock(&dev->dev_mutex);
3312 
3313 	return 0;
3314 }
3315 
3316 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3317 			       struct sk_buff_head *list, enum skb_state state)
3318 {
3319 	unsigned long flags;
3320 	enum skb_state old_state;
3321 	struct skb_data *entry = (struct skb_data *)skb->cb;
3322 
3323 	spin_lock_irqsave(&list->lock, flags);
3324 	old_state = entry->state;
3325 	entry->state = state;
3326 
3327 	__skb_unlink(skb, list);
3328 	spin_unlock(&list->lock);
3329 	spin_lock(&dev->rxq_done.lock);
3330 
3331 	__skb_queue_tail(&dev->rxq_done, skb);
3332 	if (skb_queue_len(&dev->rxq_done) == 1)
3333 		napi_schedule(&dev->napi);
3334 
3335 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3336 
3337 	return old_state;
3338 }
3339 
3340 static void tx_complete(struct urb *urb)
3341 {
3342 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3343 	struct skb_data *entry = (struct skb_data *)skb->cb;
3344 	struct lan78xx_net *dev = entry->dev;
3345 
3346 	if (urb->status == 0) {
3347 		dev->net->stats.tx_packets += entry->num_of_packet;
3348 		dev->net->stats.tx_bytes += entry->length;
3349 	} else {
3350 		dev->net->stats.tx_errors += entry->num_of_packet;
3351 
3352 		switch (urb->status) {
3353 		case -EPIPE:
3354 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3355 			break;
3356 
3357 		/* software-driven interface shutdown */
3358 		case -ECONNRESET:
3359 		case -ESHUTDOWN:
3360 			netif_dbg(dev, tx_err, dev->net,
3361 				  "tx err interface gone %d\n",
3362 				  entry->urb->status);
3363 			break;
3364 
3365 		case -EPROTO:
3366 		case -ETIME:
3367 		case -EILSEQ:
3368 			netif_stop_queue(dev->net);
3369 			netif_dbg(dev, tx_err, dev->net,
3370 				  "tx err queue stopped %d\n",
3371 				  entry->urb->status);
3372 			break;
3373 		default:
3374 			netif_dbg(dev, tx_err, dev->net,
3375 				  "unknown tx err %d\n",
3376 				  entry->urb->status);
3377 			break;
3378 		}
3379 	}
3380 
3381 	usb_autopm_put_interface_async(dev->intf);
3382 
3383 	skb_unlink(skb, &dev->txq);
3384 
3385 	lan78xx_release_tx_buf(dev, skb);
3386 
3387 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3388 	 */
3389 	if (skb_queue_empty(&dev->txq) &&
3390 	    !skb_queue_empty(&dev->txq_pend))
3391 		napi_schedule(&dev->napi);
3392 }
3393 
3394 static void lan78xx_queue_skb(struct sk_buff_head *list,
3395 			      struct sk_buff *newsk, enum skb_state state)
3396 {
3397 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3398 
3399 	__skb_queue_tail(list, newsk);
3400 	entry->state = state;
3401 }
3402 
3403 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3404 {
3405 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3406 }
3407 
3408 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3409 {
3410 	return dev->tx_pend_data_len;
3411 }
3412 
3413 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3414 				    struct sk_buff *skb,
3415 				    unsigned int *tx_pend_data_len)
3416 {
3417 	unsigned long flags;
3418 
3419 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3420 
3421 	__skb_queue_tail(&dev->txq_pend, skb);
3422 
3423 	dev->tx_pend_data_len += skb->len;
3424 	*tx_pend_data_len = dev->tx_pend_data_len;
3425 
3426 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3427 }
3428 
3429 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3430 					 struct sk_buff *skb,
3431 					 unsigned int *tx_pend_data_len)
3432 {
3433 	unsigned long flags;
3434 
3435 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3436 
3437 	__skb_queue_head(&dev->txq_pend, skb);
3438 
3439 	dev->tx_pend_data_len += skb->len;
3440 	*tx_pend_data_len = dev->tx_pend_data_len;
3441 
3442 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3443 }
3444 
3445 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3446 				    struct sk_buff **skb,
3447 				    unsigned int *tx_pend_data_len)
3448 {
3449 	unsigned long flags;
3450 
3451 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3452 
3453 	*skb = __skb_dequeue(&dev->txq_pend);
3454 	if (*skb)
3455 		dev->tx_pend_data_len -= (*skb)->len;
3456 	*tx_pend_data_len = dev->tx_pend_data_len;
3457 
3458 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3459 }
3460 
3461 static netdev_tx_t
3462 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3463 {
3464 	struct lan78xx_net *dev = netdev_priv(net);
3465 	unsigned int tx_pend_data_len;
3466 
3467 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3468 		schedule_delayed_work(&dev->wq, 0);
3469 
3470 	skb_tx_timestamp(skb);
3471 
3472 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3473 
3474 	/* Set up a Tx URB if none is in progress */
3475 
3476 	if (skb_queue_empty(&dev->txq))
3477 		napi_schedule(&dev->napi);
3478 
3479 	/* Stop stack Tx queue if we have enough data to fill
3480 	 * all the free Tx URBs.
3481 	 */
3482 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3483 		netif_stop_queue(net);
3484 
3485 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3486 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3487 
3488 		/* Kick off transmission of pending data */
3489 
3490 		if (!skb_queue_empty(&dev->txq_free))
3491 			napi_schedule(&dev->napi);
3492 	}
3493 
3494 	return NETDEV_TX_OK;
3495 }
3496 
3497 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3498 {
3499 	struct lan78xx_priv *pdata = NULL;
3500 	int ret;
3501 	int i;
3502 
3503 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3504 
3505 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3506 	if (!pdata) {
3507 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3508 		return -ENOMEM;
3509 	}
3510 
3511 	pdata->dev = dev;
3512 
3513 	spin_lock_init(&pdata->rfe_ctl_lock);
3514 	mutex_init(&pdata->dataport_mutex);
3515 
3516 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3517 
3518 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3519 		pdata->vlan_table[i] = 0;
3520 
3521 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3522 
3523 	dev->net->features = 0;
3524 
3525 	if (DEFAULT_TX_CSUM_ENABLE)
3526 		dev->net->features |= NETIF_F_HW_CSUM;
3527 
3528 	if (DEFAULT_RX_CSUM_ENABLE)
3529 		dev->net->features |= NETIF_F_RXCSUM;
3530 
3531 	if (DEFAULT_TSO_CSUM_ENABLE)
3532 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3533 
3534 	if (DEFAULT_VLAN_RX_OFFLOAD)
3535 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3536 
3537 	if (DEFAULT_VLAN_FILTER_ENABLE)
3538 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3539 
3540 	dev->net->hw_features = dev->net->features;
3541 
3542 	ret = lan78xx_setup_irq_domain(dev);
3543 	if (ret < 0) {
3544 		netdev_warn(dev->net,
3545 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3546 		goto out1;
3547 	}
3548 
3549 	/* Init all registers */
3550 	ret = lan78xx_reset(dev);
3551 	if (ret) {
3552 		netdev_warn(dev->net, "Registers INIT FAILED....");
3553 		goto out2;
3554 	}
3555 
3556 	ret = lan78xx_mdio_init(dev);
3557 	if (ret) {
3558 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3559 		goto out2;
3560 	}
3561 
3562 	dev->net->flags |= IFF_MULTICAST;
3563 
3564 	pdata->wol = WAKE_MAGIC;
3565 
3566 	return ret;
3567 
3568 out2:
3569 	lan78xx_remove_irq_domain(dev);
3570 
3571 out1:
3572 	netdev_warn(dev->net, "Bind routine FAILED");
3573 	cancel_work_sync(&pdata->set_multicast);
3574 	cancel_work_sync(&pdata->set_vlan);
3575 	kfree(pdata);
3576 	return ret;
3577 }
3578 
3579 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3580 {
3581 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3582 
3583 	lan78xx_remove_irq_domain(dev);
3584 
3585 	lan78xx_remove_mdio(dev);
3586 
3587 	if (pdata) {
3588 		cancel_work_sync(&pdata->set_multicast);
3589 		cancel_work_sync(&pdata->set_vlan);
3590 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3591 		kfree(pdata);
3592 		pdata = NULL;
3593 		dev->data[0] = 0;
3594 	}
3595 }
3596 
3597 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3598 				    struct sk_buff *skb,
3599 				    u32 rx_cmd_a, u32 rx_cmd_b)
3600 {
3601 	/* HW Checksum offload appears to be flawed if used when not stripping
3602 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3603 	 */
3604 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3605 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3606 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3607 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3608 		skb->ip_summed = CHECKSUM_NONE;
3609 	} else {
3610 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3611 		skb->ip_summed = CHECKSUM_COMPLETE;
3612 	}
3613 }
3614 
3615 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3616 				    struct sk_buff *skb,
3617 				    u32 rx_cmd_a, u32 rx_cmd_b)
3618 {
3619 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3620 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3621 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3622 				       (rx_cmd_b & 0xffff));
3623 }
3624 
3625 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3626 {
3627 	dev->net->stats.rx_packets++;
3628 	dev->net->stats.rx_bytes += skb->len;
3629 
3630 	skb->protocol = eth_type_trans(skb, dev->net);
3631 
3632 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3633 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3634 	memset(skb->cb, 0, sizeof(struct skb_data));
3635 
3636 	if (skb_defer_rx_timestamp(skb))
3637 		return;
3638 
3639 	napi_gro_receive(&dev->napi, skb);
3640 }
3641 
3642 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3643 		      int budget, int *work_done)
3644 {
3645 	if (skb->len < RX_SKB_MIN_LEN)
3646 		return 0;
3647 
3648 	/* Extract frames from the URB buffer and pass each one to
3649 	 * the stack in a new NAPI SKB.
3650 	 */
3651 	while (skb->len > 0) {
3652 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3653 		u16 rx_cmd_c;
3654 		unsigned char *packet;
3655 
3656 		rx_cmd_a = get_unaligned_le32(skb->data);
3657 		skb_pull(skb, sizeof(rx_cmd_a));
3658 
3659 		rx_cmd_b = get_unaligned_le32(skb->data);
3660 		skb_pull(skb, sizeof(rx_cmd_b));
3661 
3662 		rx_cmd_c = get_unaligned_le16(skb->data);
3663 		skb_pull(skb, sizeof(rx_cmd_c));
3664 
3665 		packet = skb->data;
3666 
3667 		/* get the packet length */
3668 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3669 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3670 
3671 		if (unlikely(size > skb->len)) {
3672 			netif_dbg(dev, rx_err, dev->net,
3673 				  "size err rx_cmd_a=0x%08x\n",
3674 				  rx_cmd_a);
3675 			return 0;
3676 		}
3677 
3678 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3679 			netif_dbg(dev, rx_err, dev->net,
3680 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3681 		} else {
3682 			u32 frame_len;
3683 			struct sk_buff *skb2;
3684 
3685 			if (unlikely(size < ETH_FCS_LEN)) {
3686 				netif_dbg(dev, rx_err, dev->net,
3687 					  "size err rx_cmd_a=0x%08x\n",
3688 					  rx_cmd_a);
3689 				return 0;
3690 			}
3691 
3692 			frame_len = size - ETH_FCS_LEN;
3693 
3694 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3695 			if (!skb2)
3696 				return 0;
3697 
3698 			memcpy(skb2->data, packet, frame_len);
3699 
3700 			skb_put(skb2, frame_len);
3701 
3702 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3703 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3704 
3705 			/* Processing of the URB buffer must complete once
3706 			 * it has started. If the NAPI work budget is exhausted
3707 			 * while frames remain they are added to the overflow
3708 			 * queue for delivery in the next NAPI polling cycle.
3709 			 */
3710 			if (*work_done < budget) {
3711 				lan78xx_skb_return(dev, skb2);
3712 				++(*work_done);
3713 			} else {
3714 				skb_queue_tail(&dev->rxq_overflow, skb2);
3715 			}
3716 		}
3717 
3718 		skb_pull(skb, size);
3719 
3720 		/* skip padding bytes before the next frame starts */
3721 		if (skb->len)
3722 			skb_pull(skb, align_count);
3723 	}
3724 
3725 	return 1;
3726 }
3727 
3728 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3729 			      int budget, int *work_done)
3730 {
3731 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3732 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3733 		dev->net->stats.rx_errors++;
3734 	}
3735 }
3736 
3737 static void rx_complete(struct urb *urb)
3738 {
3739 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3740 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3741 	struct lan78xx_net *dev = entry->dev;
3742 	int urb_status = urb->status;
3743 	enum skb_state state;
3744 
3745 	netif_dbg(dev, rx_status, dev->net,
3746 		  "rx done: status %d", urb->status);
3747 
3748 	skb_put(skb, urb->actual_length);
3749 	state = rx_done;
3750 
3751 	if (urb != entry->urb)
3752 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3753 
3754 	switch (urb_status) {
3755 	case 0:
3756 		if (skb->len < RX_SKB_MIN_LEN) {
3757 			state = rx_cleanup;
3758 			dev->net->stats.rx_errors++;
3759 			dev->net->stats.rx_length_errors++;
3760 			netif_dbg(dev, rx_err, dev->net,
3761 				  "rx length %d\n", skb->len);
3762 		}
3763 		usb_mark_last_busy(dev->udev);
3764 		break;
3765 	case -EPIPE:
3766 		dev->net->stats.rx_errors++;
3767 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3768 		fallthrough;
3769 	case -ECONNRESET:				/* async unlink */
3770 	case -ESHUTDOWN:				/* hardware gone */
3771 		netif_dbg(dev, ifdown, dev->net,
3772 			  "rx shutdown, code %d\n", urb_status);
3773 		state = rx_cleanup;
3774 		break;
3775 	case -EPROTO:
3776 	case -ETIME:
3777 	case -EILSEQ:
3778 		dev->net->stats.rx_errors++;
3779 		state = rx_cleanup;
3780 		break;
3781 
3782 	/* data overrun ... flush fifo? */
3783 	case -EOVERFLOW:
3784 		dev->net->stats.rx_over_errors++;
3785 		fallthrough;
3786 
3787 	default:
3788 		state = rx_cleanup;
3789 		dev->net->stats.rx_errors++;
3790 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3791 		break;
3792 	}
3793 
3794 	state = defer_bh(dev, skb, &dev->rxq, state);
3795 }
3796 
3797 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3798 {
3799 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3800 	size_t size = dev->rx_urb_size;
3801 	struct urb *urb = entry->urb;
3802 	unsigned long lockflags;
3803 	int ret = 0;
3804 
3805 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3806 			  skb->data, size, rx_complete, skb);
3807 
3808 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3809 
3810 	if (netif_device_present(dev->net) &&
3811 	    netif_running(dev->net) &&
3812 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3813 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3814 		ret = usb_submit_urb(urb, flags);
3815 		switch (ret) {
3816 		case 0:
3817 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3818 			break;
3819 		case -EPIPE:
3820 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3821 			break;
3822 		case -ENODEV:
3823 		case -ENOENT:
3824 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3825 			netif_device_detach(dev->net);
3826 			break;
3827 		case -EHOSTUNREACH:
3828 			ret = -ENOLINK;
3829 			napi_schedule(&dev->napi);
3830 			break;
3831 		default:
3832 			netif_dbg(dev, rx_err, dev->net,
3833 				  "rx submit, %d\n", ret);
3834 			napi_schedule(&dev->napi);
3835 			break;
3836 		}
3837 	} else {
3838 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3839 		ret = -ENOLINK;
3840 	}
3841 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3842 
3843 	if (ret)
3844 		lan78xx_release_rx_buf(dev, skb);
3845 
3846 	return ret;
3847 }
3848 
3849 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3850 {
3851 	struct sk_buff *rx_buf;
3852 
3853 	/* Ensure the maximum number of Rx URBs is submitted
3854 	 */
3855 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3856 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3857 			break;
3858 	}
3859 }
3860 
3861 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3862 				    struct sk_buff *rx_buf)
3863 {
3864 	/* reset SKB data pointers */
3865 
3866 	rx_buf->data = rx_buf->head;
3867 	skb_reset_tail_pointer(rx_buf);
3868 	rx_buf->len = 0;
3869 	rx_buf->data_len = 0;
3870 
3871 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3872 }
3873 
3874 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3875 {
3876 	u32 tx_cmd_a;
3877 	u32 tx_cmd_b;
3878 
3879 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3880 
3881 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3882 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3883 
3884 	tx_cmd_b = 0;
3885 	if (skb_is_gso(skb)) {
3886 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3887 
3888 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3889 
3890 		tx_cmd_a |= TX_CMD_A_LSO_;
3891 	}
3892 
3893 	if (skb_vlan_tag_present(skb)) {
3894 		tx_cmd_a |= TX_CMD_A_IVTG_;
3895 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3896 	}
3897 
3898 	put_unaligned_le32(tx_cmd_a, buffer);
3899 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3900 }
3901 
3902 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3903 					    struct sk_buff *tx_buf)
3904 {
3905 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3906 	int remain = dev->tx_urb_size;
3907 	u8 *tx_data = tx_buf->data;
3908 	u32 urb_len = 0;
3909 
3910 	entry->num_of_packet = 0;
3911 	entry->length = 0;
3912 
3913 	/* Work through the pending SKBs and copy the data of each SKB into
3914 	 * the URB buffer if there room for all the SKB data.
3915 	 *
3916 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3917 	 */
3918 	while (remain >= TX_SKB_MIN_LEN) {
3919 		unsigned int pending_bytes;
3920 		unsigned int align_bytes;
3921 		struct sk_buff *skb;
3922 		unsigned int len;
3923 
3924 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3925 
3926 		if (!skb)
3927 			break;
3928 
3929 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3930 			      TX_ALIGNMENT;
3931 		len = align_bytes + TX_CMD_LEN + skb->len;
3932 		if (len > remain) {
3933 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3934 			break;
3935 		}
3936 
3937 		tx_data += align_bytes;
3938 
3939 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3940 		tx_data += TX_CMD_LEN;
3941 
3942 		len = skb->len;
3943 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3944 			struct net_device_stats *stats = &dev->net->stats;
3945 
3946 			stats->tx_dropped++;
3947 			dev_kfree_skb_any(skb);
3948 			tx_data -= TX_CMD_LEN;
3949 			continue;
3950 		}
3951 
3952 		tx_data += len;
3953 		entry->length += len;
3954 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3955 
3956 		dev_kfree_skb_any(skb);
3957 
3958 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3959 
3960 		remain = dev->tx_urb_size - urb_len;
3961 	}
3962 
3963 	skb_put(tx_buf, urb_len);
3964 
3965 	return entry;
3966 }
3967 
3968 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3969 {
3970 	int ret;
3971 
3972 	/* Start the stack Tx queue if it was stopped
3973 	 */
3974 	netif_tx_lock(dev->net);
3975 	if (netif_queue_stopped(dev->net)) {
3976 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3977 			netif_wake_queue(dev->net);
3978 	}
3979 	netif_tx_unlock(dev->net);
3980 
3981 	/* Go through the Tx pending queue and set up URBs to transfer
3982 	 * the data to the device. Stop if no more pending data or URBs,
3983 	 * or if an error occurs when a URB is submitted.
3984 	 */
3985 	do {
3986 		struct skb_data *entry;
3987 		struct sk_buff *tx_buf;
3988 		unsigned long flags;
3989 
3990 		if (skb_queue_empty(&dev->txq_pend))
3991 			break;
3992 
3993 		tx_buf = lan78xx_get_tx_buf(dev);
3994 		if (!tx_buf)
3995 			break;
3996 
3997 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3998 
3999 		spin_lock_irqsave(&dev->txq.lock, flags);
4000 		ret = usb_autopm_get_interface_async(dev->intf);
4001 		if (ret < 0) {
4002 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4003 			goto out;
4004 		}
4005 
4006 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4007 				  tx_buf->data, tx_buf->len, tx_complete,
4008 				  tx_buf);
4009 
4010 		if (tx_buf->len % dev->maxpacket == 0) {
4011 			/* send USB_ZERO_PACKET */
4012 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4013 		}
4014 
4015 #ifdef CONFIG_PM
4016 		/* if device is asleep stop outgoing packet processing */
4017 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4018 			usb_anchor_urb(entry->urb, &dev->deferred);
4019 			netif_stop_queue(dev->net);
4020 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4021 			netdev_dbg(dev->net,
4022 				   "Delaying transmission for resumption\n");
4023 			return;
4024 		}
4025 #endif
4026 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4027 		switch (ret) {
4028 		case 0:
4029 			netif_trans_update(dev->net);
4030 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4031 			break;
4032 		case -EPIPE:
4033 			netif_stop_queue(dev->net);
4034 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4035 			usb_autopm_put_interface_async(dev->intf);
4036 			break;
4037 		case -ENODEV:
4038 		case -ENOENT:
4039 			netif_dbg(dev, tx_err, dev->net,
4040 				  "tx submit urb err %d (disconnected?)", ret);
4041 			netif_device_detach(dev->net);
4042 			break;
4043 		default:
4044 			usb_autopm_put_interface_async(dev->intf);
4045 			netif_dbg(dev, tx_err, dev->net,
4046 				  "tx submit urb err %d\n", ret);
4047 			break;
4048 		}
4049 
4050 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4051 
4052 		if (ret) {
4053 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4054 out:
4055 			dev->net->stats.tx_dropped += entry->num_of_packet;
4056 			lan78xx_release_tx_buf(dev, tx_buf);
4057 		}
4058 	} while (ret == 0);
4059 }
4060 
4061 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4062 {
4063 	struct sk_buff_head done;
4064 	struct sk_buff *rx_buf;
4065 	struct skb_data *entry;
4066 	unsigned long flags;
4067 	int work_done = 0;
4068 
4069 	/* Pass frames received in the last NAPI cycle before
4070 	 * working on newly completed URBs.
4071 	 */
4072 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4073 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4074 		++work_done;
4075 	}
4076 
4077 	/* Take a snapshot of the done queue and move items to a
4078 	 * temporary queue. Rx URB completions will continue to add
4079 	 * to the done queue.
4080 	 */
4081 	__skb_queue_head_init(&done);
4082 
4083 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4084 	skb_queue_splice_init(&dev->rxq_done, &done);
4085 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4086 
4087 	/* Extract receive frames from completed URBs and
4088 	 * pass them to the stack. Re-submit each completed URB.
4089 	 */
4090 	while ((work_done < budget) &&
4091 	       (rx_buf = __skb_dequeue(&done))) {
4092 		entry = (struct skb_data *)(rx_buf->cb);
4093 		switch (entry->state) {
4094 		case rx_done:
4095 			rx_process(dev, rx_buf, budget, &work_done);
4096 			break;
4097 		case rx_cleanup:
4098 			break;
4099 		default:
4100 			netdev_dbg(dev->net, "rx buf state %d\n",
4101 				   entry->state);
4102 			break;
4103 		}
4104 
4105 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4106 	}
4107 
4108 	/* If budget was consumed before processing all the URBs put them
4109 	 * back on the front of the done queue. They will be first to be
4110 	 * processed in the next NAPI cycle.
4111 	 */
4112 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4113 	skb_queue_splice(&done, &dev->rxq_done);
4114 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4115 
4116 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4117 		/* reset update timer delta */
4118 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4119 			dev->delta = 1;
4120 			mod_timer(&dev->stat_monitor,
4121 				  jiffies + STAT_UPDATE_TIMER);
4122 		}
4123 
4124 		/* Submit all free Rx URBs */
4125 
4126 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4127 			lan78xx_rx_urb_submit_all(dev);
4128 
4129 		/* Submit new Tx URBs */
4130 
4131 		lan78xx_tx_bh(dev);
4132 	}
4133 
4134 	return work_done;
4135 }
4136 
4137 static int lan78xx_poll(struct napi_struct *napi, int budget)
4138 {
4139 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4140 	int result = budget;
4141 	int work_done;
4142 
4143 	/* Don't do any work if the device is suspended */
4144 
4145 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4146 		napi_complete_done(napi, 0);
4147 		return 0;
4148 	}
4149 
4150 	/* Process completed URBs and submit new URBs */
4151 
4152 	work_done = lan78xx_bh(dev, budget);
4153 
4154 	if (work_done < budget) {
4155 		napi_complete_done(napi, work_done);
4156 
4157 		/* Start a new polling cycle if data was received or
4158 		 * data is waiting to be transmitted.
4159 		 */
4160 		if (!skb_queue_empty(&dev->rxq_done)) {
4161 			napi_schedule(napi);
4162 		} else if (netif_carrier_ok(dev->net)) {
4163 			if (skb_queue_empty(&dev->txq) &&
4164 			    !skb_queue_empty(&dev->txq_pend)) {
4165 				napi_schedule(napi);
4166 			} else {
4167 				netif_tx_lock(dev->net);
4168 				if (netif_queue_stopped(dev->net)) {
4169 					netif_wake_queue(dev->net);
4170 					napi_schedule(napi);
4171 				}
4172 				netif_tx_unlock(dev->net);
4173 			}
4174 		}
4175 		result = work_done;
4176 	}
4177 
4178 	return result;
4179 }
4180 
4181 static void lan78xx_delayedwork(struct work_struct *work)
4182 {
4183 	int status;
4184 	struct lan78xx_net *dev;
4185 
4186 	dev = container_of(work, struct lan78xx_net, wq.work);
4187 
4188 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4189 		return;
4190 
4191 	if (usb_autopm_get_interface(dev->intf) < 0)
4192 		return;
4193 
4194 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4195 		unlink_urbs(dev, &dev->txq);
4196 
4197 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4198 		if (status < 0 &&
4199 		    status != -EPIPE &&
4200 		    status != -ESHUTDOWN) {
4201 			if (netif_msg_tx_err(dev))
4202 				netdev_err(dev->net,
4203 					   "can't clear tx halt, status %d\n",
4204 					   status);
4205 		} else {
4206 			clear_bit(EVENT_TX_HALT, &dev->flags);
4207 			if (status != -ESHUTDOWN)
4208 				netif_wake_queue(dev->net);
4209 		}
4210 	}
4211 
4212 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4213 		unlink_urbs(dev, &dev->rxq);
4214 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4215 		if (status < 0 &&
4216 		    status != -EPIPE &&
4217 		    status != -ESHUTDOWN) {
4218 			if (netif_msg_rx_err(dev))
4219 				netdev_err(dev->net,
4220 					   "can't clear rx halt, status %d\n",
4221 					   status);
4222 		} else {
4223 			clear_bit(EVENT_RX_HALT, &dev->flags);
4224 			napi_schedule(&dev->napi);
4225 		}
4226 	}
4227 
4228 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4229 		int ret = 0;
4230 
4231 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4232 		if (lan78xx_link_reset(dev) < 0) {
4233 			netdev_info(dev->net, "link reset failed (%d)\n",
4234 				    ret);
4235 		}
4236 	}
4237 
4238 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4239 		lan78xx_update_stats(dev);
4240 
4241 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4242 
4243 		mod_timer(&dev->stat_monitor,
4244 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4245 
4246 		dev->delta = min((dev->delta * 2), 50);
4247 	}
4248 
4249 	usb_autopm_put_interface(dev->intf);
4250 }
4251 
4252 static void intr_complete(struct urb *urb)
4253 {
4254 	struct lan78xx_net *dev = urb->context;
4255 	int status = urb->status;
4256 
4257 	switch (status) {
4258 	/* success */
4259 	case 0:
4260 		lan78xx_status(dev, urb);
4261 		break;
4262 
4263 	/* software-driven interface shutdown */
4264 	case -ENOENT:			/* urb killed */
4265 	case -ENODEV:			/* hardware gone */
4266 	case -ESHUTDOWN:		/* hardware gone */
4267 		netif_dbg(dev, ifdown, dev->net,
4268 			  "intr shutdown, code %d\n", status);
4269 		return;
4270 
4271 	/* NOTE:  not throttling like RX/TX, since this endpoint
4272 	 * already polls infrequently
4273 	 */
4274 	default:
4275 		netdev_dbg(dev->net, "intr status %d\n", status);
4276 		break;
4277 	}
4278 
4279 	if (!netif_device_present(dev->net) ||
4280 	    !netif_running(dev->net)) {
4281 		netdev_warn(dev->net, "not submitting new status URB");
4282 		return;
4283 	}
4284 
4285 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4286 	status = usb_submit_urb(urb, GFP_ATOMIC);
4287 
4288 	switch (status) {
4289 	case  0:
4290 		break;
4291 	case -ENODEV:
4292 	case -ENOENT:
4293 		netif_dbg(dev, timer, dev->net,
4294 			  "intr resubmit %d (disconnect?)", status);
4295 		netif_device_detach(dev->net);
4296 		break;
4297 	default:
4298 		netif_err(dev, timer, dev->net,
4299 			  "intr resubmit --> %d\n", status);
4300 		break;
4301 	}
4302 }
4303 
4304 static void lan78xx_disconnect(struct usb_interface *intf)
4305 {
4306 	struct lan78xx_net *dev;
4307 	struct usb_device *udev;
4308 	struct net_device *net;
4309 	struct phy_device *phydev;
4310 
4311 	dev = usb_get_intfdata(intf);
4312 	usb_set_intfdata(intf, NULL);
4313 	if (!dev)
4314 		return;
4315 
4316 	netif_napi_del(&dev->napi);
4317 
4318 	udev = interface_to_usbdev(intf);
4319 	net = dev->net;
4320 
4321 	unregister_netdev(net);
4322 
4323 	timer_shutdown_sync(&dev->stat_monitor);
4324 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4325 	cancel_delayed_work_sync(&dev->wq);
4326 
4327 	phydev = net->phydev;
4328 
4329 	phy_disconnect(net->phydev);
4330 
4331 	if (phy_is_pseudo_fixed_link(phydev)) {
4332 		fixed_phy_unregister(phydev);
4333 		phy_device_free(phydev);
4334 	}
4335 
4336 	usb_scuttle_anchored_urbs(&dev->deferred);
4337 
4338 	lan78xx_unbind(dev, intf);
4339 
4340 	lan78xx_free_tx_resources(dev);
4341 	lan78xx_free_rx_resources(dev);
4342 
4343 	usb_kill_urb(dev->urb_intr);
4344 	usb_free_urb(dev->urb_intr);
4345 
4346 	free_netdev(net);
4347 	usb_put_dev(udev);
4348 }
4349 
4350 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4351 {
4352 	struct lan78xx_net *dev = netdev_priv(net);
4353 
4354 	unlink_urbs(dev, &dev->txq);
4355 	napi_schedule(&dev->napi);
4356 }
4357 
4358 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4359 						struct net_device *netdev,
4360 						netdev_features_t features)
4361 {
4362 	struct lan78xx_net *dev = netdev_priv(netdev);
4363 
4364 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4365 		features &= ~NETIF_F_GSO_MASK;
4366 
4367 	features = vlan_features_check(skb, features);
4368 	features = vxlan_features_check(skb, features);
4369 
4370 	return features;
4371 }
4372 
4373 static const struct net_device_ops lan78xx_netdev_ops = {
4374 	.ndo_open		= lan78xx_open,
4375 	.ndo_stop		= lan78xx_stop,
4376 	.ndo_start_xmit		= lan78xx_start_xmit,
4377 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4378 	.ndo_change_mtu		= lan78xx_change_mtu,
4379 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4380 	.ndo_validate_addr	= eth_validate_addr,
4381 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4382 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4383 	.ndo_set_features	= lan78xx_set_features,
4384 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4385 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4386 	.ndo_features_check	= lan78xx_features_check,
4387 };
4388 
4389 static void lan78xx_stat_monitor(struct timer_list *t)
4390 {
4391 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4392 
4393 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4394 }
4395 
4396 static int lan78xx_probe(struct usb_interface *intf,
4397 			 const struct usb_device_id *id)
4398 {
4399 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4400 	struct lan78xx_net *dev;
4401 	struct net_device *netdev;
4402 	struct usb_device *udev;
4403 	int ret;
4404 	unsigned int maxp;
4405 	unsigned int period;
4406 	u8 *buf = NULL;
4407 
4408 	udev = interface_to_usbdev(intf);
4409 	udev = usb_get_dev(udev);
4410 
4411 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4412 	if (!netdev) {
4413 		dev_err(&intf->dev, "Error: OOM\n");
4414 		ret = -ENOMEM;
4415 		goto out1;
4416 	}
4417 
4418 	/* netdev_printk() needs this */
4419 	SET_NETDEV_DEV(netdev, &intf->dev);
4420 
4421 	dev = netdev_priv(netdev);
4422 	dev->udev = udev;
4423 	dev->intf = intf;
4424 	dev->net = netdev;
4425 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4426 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4427 
4428 	skb_queue_head_init(&dev->rxq);
4429 	skb_queue_head_init(&dev->txq);
4430 	skb_queue_head_init(&dev->rxq_done);
4431 	skb_queue_head_init(&dev->txq_pend);
4432 	skb_queue_head_init(&dev->rxq_overflow);
4433 	mutex_init(&dev->phy_mutex);
4434 	mutex_init(&dev->dev_mutex);
4435 
4436 	ret = lan78xx_urb_config_init(dev);
4437 	if (ret < 0)
4438 		goto out2;
4439 
4440 	ret = lan78xx_alloc_tx_resources(dev);
4441 	if (ret < 0)
4442 		goto out2;
4443 
4444 	ret = lan78xx_alloc_rx_resources(dev);
4445 	if (ret < 0)
4446 		goto out3;
4447 
4448 	/* MTU range: 68 - 9000 */
4449 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4450 
4451 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4452 
4453 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4454 
4455 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4456 	init_usb_anchor(&dev->deferred);
4457 
4458 	netdev->netdev_ops = &lan78xx_netdev_ops;
4459 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4460 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4461 
4462 	dev->delta = 1;
4463 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4464 
4465 	mutex_init(&dev->stats.access_lock);
4466 
4467 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4468 		ret = -ENODEV;
4469 		goto out4;
4470 	}
4471 
4472 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4473 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4474 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4475 		ret = -ENODEV;
4476 		goto out4;
4477 	}
4478 
4479 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4480 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4481 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4482 		ret = -ENODEV;
4483 		goto out4;
4484 	}
4485 
4486 	ep_intr = &intf->cur_altsetting->endpoint[2];
4487 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4488 		ret = -ENODEV;
4489 		goto out4;
4490 	}
4491 
4492 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4493 					usb_endpoint_num(&ep_intr->desc));
4494 
4495 	ret = lan78xx_bind(dev, intf);
4496 	if (ret < 0)
4497 		goto out4;
4498 
4499 	period = ep_intr->desc.bInterval;
4500 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4501 
4502 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4503 	if (!dev->urb_intr) {
4504 		ret = -ENOMEM;
4505 		goto out5;
4506 	}
4507 
4508 	buf = kmalloc(maxp, GFP_KERNEL);
4509 	if (!buf) {
4510 		ret = -ENOMEM;
4511 		goto free_urbs;
4512 	}
4513 
4514 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4515 			 dev->pipe_intr, buf, maxp,
4516 			 intr_complete, dev, period);
4517 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4518 
4519 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4520 
4521 	/* Reject broken descriptors. */
4522 	if (dev->maxpacket == 0) {
4523 		ret = -ENODEV;
4524 		goto free_urbs;
4525 	}
4526 
4527 	/* driver requires remote-wakeup capability during autosuspend. */
4528 	intf->needs_remote_wakeup = 1;
4529 
4530 	ret = lan78xx_phy_init(dev);
4531 	if (ret < 0)
4532 		goto free_urbs;
4533 
4534 	ret = register_netdev(netdev);
4535 	if (ret != 0) {
4536 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4537 		goto out8;
4538 	}
4539 
4540 	usb_set_intfdata(intf, dev);
4541 
4542 	ret = device_set_wakeup_enable(&udev->dev, true);
4543 
4544 	 /* Default delay of 2sec has more overhead than advantage.
4545 	  * Set to 10sec as default.
4546 	  */
4547 	pm_runtime_set_autosuspend_delay(&udev->dev,
4548 					 DEFAULT_AUTOSUSPEND_DELAY);
4549 
4550 	return 0;
4551 
4552 out8:
4553 	phy_disconnect(netdev->phydev);
4554 free_urbs:
4555 	usb_free_urb(dev->urb_intr);
4556 out5:
4557 	lan78xx_unbind(dev, intf);
4558 out4:
4559 	netif_napi_del(&dev->napi);
4560 	lan78xx_free_rx_resources(dev);
4561 out3:
4562 	lan78xx_free_tx_resources(dev);
4563 out2:
4564 	free_netdev(netdev);
4565 out1:
4566 	usb_put_dev(udev);
4567 
4568 	return ret;
4569 }
4570 
4571 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4572 {
4573 	const u16 crc16poly = 0x8005;
4574 	int i;
4575 	u16 bit, crc, msb;
4576 	u8 data;
4577 
4578 	crc = 0xFFFF;
4579 	for (i = 0; i < len; i++) {
4580 		data = *buf++;
4581 		for (bit = 0; bit < 8; bit++) {
4582 			msb = crc >> 15;
4583 			crc <<= 1;
4584 
4585 			if (msb ^ (u16)(data & 1)) {
4586 				crc ^= crc16poly;
4587 				crc |= (u16)0x0001U;
4588 			}
4589 			data >>= 1;
4590 		}
4591 	}
4592 
4593 	return crc;
4594 }
4595 
4596 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4597 {
4598 	u32 buf;
4599 	int ret;
4600 
4601 	ret = lan78xx_stop_tx_path(dev);
4602 	if (ret < 0)
4603 		return ret;
4604 
4605 	ret = lan78xx_stop_rx_path(dev);
4606 	if (ret < 0)
4607 		return ret;
4608 
4609 	/* auto suspend (selective suspend) */
4610 
4611 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4612 	if (ret < 0)
4613 		return ret;
4614 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4615 	if (ret < 0)
4616 		return ret;
4617 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4618 	if (ret < 0)
4619 		return ret;
4620 
4621 	/* set goodframe wakeup */
4622 
4623 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4624 	if (ret < 0)
4625 		return ret;
4626 
4627 	buf |= WUCSR_RFE_WAKE_EN_;
4628 	buf |= WUCSR_STORE_WAKE_;
4629 
4630 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4631 	if (ret < 0)
4632 		return ret;
4633 
4634 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4635 	if (ret < 0)
4636 		return ret;
4637 
4638 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4639 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4640 	buf |= PMT_CTL_PHY_WAKE_EN_;
4641 	buf |= PMT_CTL_WOL_EN_;
4642 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4643 	buf |= PMT_CTL_SUS_MODE_3_;
4644 
4645 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4646 	if (ret < 0)
4647 		return ret;
4648 
4649 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4650 	if (ret < 0)
4651 		return ret;
4652 
4653 	buf |= PMT_CTL_WUPS_MASK_;
4654 
4655 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4656 	if (ret < 0)
4657 		return ret;
4658 
4659 	ret = lan78xx_start_rx_path(dev);
4660 
4661 	return ret;
4662 }
4663 
4664 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4665 {
4666 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4667 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4668 	const u8 arp_type[2] = { 0x08, 0x06 };
4669 	u32 temp_pmt_ctl;
4670 	int mask_index;
4671 	u32 temp_wucsr;
4672 	u32 buf;
4673 	u16 crc;
4674 	int ret;
4675 
4676 	ret = lan78xx_stop_tx_path(dev);
4677 	if (ret < 0)
4678 		return ret;
4679 	ret = lan78xx_stop_rx_path(dev);
4680 	if (ret < 0)
4681 		return ret;
4682 
4683 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4684 	if (ret < 0)
4685 		return ret;
4686 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4687 	if (ret < 0)
4688 		return ret;
4689 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4690 	if (ret < 0)
4691 		return ret;
4692 
4693 	temp_wucsr = 0;
4694 
4695 	temp_pmt_ctl = 0;
4696 
4697 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4698 	if (ret < 0)
4699 		return ret;
4700 
4701 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4702 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4703 
4704 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4705 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4706 		if (ret < 0)
4707 			return ret;
4708 	}
4709 
4710 	mask_index = 0;
4711 	if (wol & WAKE_PHY) {
4712 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4713 
4714 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4715 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4716 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4717 	}
4718 	if (wol & WAKE_MAGIC) {
4719 		temp_wucsr |= WUCSR_MPEN_;
4720 
4721 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4722 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4723 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4724 	}
4725 	if (wol & WAKE_BCAST) {
4726 		temp_wucsr |= WUCSR_BCST_EN_;
4727 
4728 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4729 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4730 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4731 	}
4732 	if (wol & WAKE_MCAST) {
4733 		temp_wucsr |= WUCSR_WAKE_EN_;
4734 
4735 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4736 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4737 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4738 					WUF_CFGX_EN_ |
4739 					WUF_CFGX_TYPE_MCAST_ |
4740 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4741 					(crc & WUF_CFGX_CRC16_MASK_));
4742 		if (ret < 0)
4743 			return ret;
4744 
4745 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4746 		if (ret < 0)
4747 			return ret;
4748 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4749 		if (ret < 0)
4750 			return ret;
4751 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4752 		if (ret < 0)
4753 			return ret;
4754 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4755 		if (ret < 0)
4756 			return ret;
4757 
4758 		mask_index++;
4759 
4760 		/* for IPv6 Multicast */
4761 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4762 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4763 					WUF_CFGX_EN_ |
4764 					WUF_CFGX_TYPE_MCAST_ |
4765 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4766 					(crc & WUF_CFGX_CRC16_MASK_));
4767 		if (ret < 0)
4768 			return ret;
4769 
4770 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4771 		if (ret < 0)
4772 			return ret;
4773 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4774 		if (ret < 0)
4775 			return ret;
4776 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4777 		if (ret < 0)
4778 			return ret;
4779 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4780 		if (ret < 0)
4781 			return ret;
4782 
4783 		mask_index++;
4784 
4785 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4786 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4787 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4788 	}
4789 	if (wol & WAKE_UCAST) {
4790 		temp_wucsr |= WUCSR_PFDA_EN_;
4791 
4792 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4793 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4794 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4795 	}
4796 	if (wol & WAKE_ARP) {
4797 		temp_wucsr |= WUCSR_WAKE_EN_;
4798 
4799 		/* set WUF_CFG & WUF_MASK
4800 		 * for packettype (offset 12,13) = ARP (0x0806)
4801 		 */
4802 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4803 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4804 					WUF_CFGX_EN_ |
4805 					WUF_CFGX_TYPE_ALL_ |
4806 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4807 					(crc & WUF_CFGX_CRC16_MASK_));
4808 		if (ret < 0)
4809 			return ret;
4810 
4811 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4812 		if (ret < 0)
4813 			return ret;
4814 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4815 		if (ret < 0)
4816 			return ret;
4817 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4818 		if (ret < 0)
4819 			return ret;
4820 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4821 		if (ret < 0)
4822 			return ret;
4823 
4824 		mask_index++;
4825 
4826 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4827 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4828 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4829 	}
4830 
4831 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4832 	if (ret < 0)
4833 		return ret;
4834 
4835 	/* when multiple WOL bits are set */
4836 	if (hweight_long((unsigned long)wol) > 1) {
4837 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4838 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4839 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4840 	}
4841 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4842 	if (ret < 0)
4843 		return ret;
4844 
4845 	/* clear WUPS */
4846 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4847 	if (ret < 0)
4848 		return ret;
4849 
4850 	buf |= PMT_CTL_WUPS_MASK_;
4851 
4852 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4853 	if (ret < 0)
4854 		return ret;
4855 
4856 	ret = lan78xx_start_rx_path(dev);
4857 
4858 	return ret;
4859 }
4860 
4861 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4862 {
4863 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4864 	bool dev_open;
4865 	int ret;
4866 
4867 	mutex_lock(&dev->dev_mutex);
4868 
4869 	netif_dbg(dev, ifdown, dev->net,
4870 		  "suspending: pm event %#x", message.event);
4871 
4872 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4873 
4874 	if (dev_open) {
4875 		spin_lock_irq(&dev->txq.lock);
4876 		/* don't autosuspend while transmitting */
4877 		if ((skb_queue_len(&dev->txq) ||
4878 		     skb_queue_len(&dev->txq_pend)) &&
4879 		    PMSG_IS_AUTO(message)) {
4880 			spin_unlock_irq(&dev->txq.lock);
4881 			ret = -EBUSY;
4882 			goto out;
4883 		} else {
4884 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4885 			spin_unlock_irq(&dev->txq.lock);
4886 		}
4887 
4888 		/* stop RX */
4889 		ret = lan78xx_stop_rx_path(dev);
4890 		if (ret < 0)
4891 			goto out;
4892 
4893 		ret = lan78xx_flush_rx_fifo(dev);
4894 		if (ret < 0)
4895 			goto out;
4896 
4897 		/* stop Tx */
4898 		ret = lan78xx_stop_tx_path(dev);
4899 		if (ret < 0)
4900 			goto out;
4901 
4902 		/* empty out the Rx and Tx queues */
4903 		netif_device_detach(dev->net);
4904 		lan78xx_terminate_urbs(dev);
4905 		usb_kill_urb(dev->urb_intr);
4906 
4907 		/* reattach */
4908 		netif_device_attach(dev->net);
4909 
4910 		del_timer(&dev->stat_monitor);
4911 
4912 		if (PMSG_IS_AUTO(message)) {
4913 			ret = lan78xx_set_auto_suspend(dev);
4914 			if (ret < 0)
4915 				goto out;
4916 		} else {
4917 			struct lan78xx_priv *pdata;
4918 
4919 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4920 			netif_carrier_off(dev->net);
4921 			ret = lan78xx_set_suspend(dev, pdata->wol);
4922 			if (ret < 0)
4923 				goto out;
4924 		}
4925 	} else {
4926 		/* Interface is down; don't allow WOL and PHY
4927 		 * events to wake up the host
4928 		 */
4929 		u32 buf;
4930 
4931 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4932 
4933 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4934 		if (ret < 0)
4935 			goto out;
4936 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4937 		if (ret < 0)
4938 			goto out;
4939 
4940 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4941 		if (ret < 0)
4942 			goto out;
4943 
4944 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4945 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4946 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4947 		buf |= PMT_CTL_SUS_MODE_3_;
4948 
4949 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4950 		if (ret < 0)
4951 			goto out;
4952 
4953 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4954 		if (ret < 0)
4955 			goto out;
4956 
4957 		buf |= PMT_CTL_WUPS_MASK_;
4958 
4959 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4960 		if (ret < 0)
4961 			goto out;
4962 	}
4963 
4964 	ret = 0;
4965 out:
4966 	mutex_unlock(&dev->dev_mutex);
4967 
4968 	return ret;
4969 }
4970 
4971 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4972 {
4973 	bool pipe_halted = false;
4974 	struct urb *urb;
4975 
4976 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4977 		struct sk_buff *skb = urb->context;
4978 		int ret;
4979 
4980 		if (!netif_device_present(dev->net) ||
4981 		    !netif_carrier_ok(dev->net) ||
4982 		    pipe_halted) {
4983 			lan78xx_release_tx_buf(dev, skb);
4984 			continue;
4985 		}
4986 
4987 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4988 
4989 		if (ret == 0) {
4990 			netif_trans_update(dev->net);
4991 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4992 		} else {
4993 			if (ret == -EPIPE) {
4994 				netif_stop_queue(dev->net);
4995 				pipe_halted = true;
4996 			} else if (ret == -ENODEV) {
4997 				netif_device_detach(dev->net);
4998 			}
4999 
5000 			lan78xx_release_tx_buf(dev, skb);
5001 		}
5002 	}
5003 
5004 	return pipe_halted;
5005 }
5006 
5007 static int lan78xx_resume(struct usb_interface *intf)
5008 {
5009 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5010 	bool dev_open;
5011 	int ret;
5012 
5013 	mutex_lock(&dev->dev_mutex);
5014 
5015 	netif_dbg(dev, ifup, dev->net, "resuming device");
5016 
5017 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5018 
5019 	if (dev_open) {
5020 		bool pipe_halted = false;
5021 
5022 		ret = lan78xx_flush_tx_fifo(dev);
5023 		if (ret < 0)
5024 			goto out;
5025 
5026 		if (dev->urb_intr) {
5027 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5028 
5029 			if (ret < 0) {
5030 				if (ret == -ENODEV)
5031 					netif_device_detach(dev->net);
5032 				netdev_warn(dev->net, "Failed to submit intr URB");
5033 			}
5034 		}
5035 
5036 		spin_lock_irq(&dev->txq.lock);
5037 
5038 		if (netif_device_present(dev->net)) {
5039 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5040 
5041 			if (pipe_halted)
5042 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5043 		}
5044 
5045 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5046 
5047 		spin_unlock_irq(&dev->txq.lock);
5048 
5049 		if (!pipe_halted &&
5050 		    netif_device_present(dev->net) &&
5051 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5052 			netif_start_queue(dev->net);
5053 
5054 		ret = lan78xx_start_tx_path(dev);
5055 		if (ret < 0)
5056 			goto out;
5057 
5058 		napi_schedule(&dev->napi);
5059 
5060 		if (!timer_pending(&dev->stat_monitor)) {
5061 			dev->delta = 1;
5062 			mod_timer(&dev->stat_monitor,
5063 				  jiffies + STAT_UPDATE_TIMER);
5064 		}
5065 
5066 	} else {
5067 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5068 	}
5069 
5070 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5071 	if (ret < 0)
5072 		goto out;
5073 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5074 	if (ret < 0)
5075 		goto out;
5076 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5077 	if (ret < 0)
5078 		goto out;
5079 
5080 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5081 					     WUCSR2_ARP_RCD_ |
5082 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5083 					     WUCSR2_IPV4_TCPSYN_RCD_);
5084 	if (ret < 0)
5085 		goto out;
5086 
5087 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5088 					    WUCSR_EEE_RX_WAKE_ |
5089 					    WUCSR_PFDA_FR_ |
5090 					    WUCSR_RFE_WAKE_FR_ |
5091 					    WUCSR_WUFR_ |
5092 					    WUCSR_MPR_ |
5093 					    WUCSR_BCST_FR_);
5094 	if (ret < 0)
5095 		goto out;
5096 
5097 	ret = 0;
5098 out:
5099 	mutex_unlock(&dev->dev_mutex);
5100 
5101 	return ret;
5102 }
5103 
5104 static int lan78xx_reset_resume(struct usb_interface *intf)
5105 {
5106 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5107 	int ret;
5108 
5109 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5110 
5111 	ret = lan78xx_reset(dev);
5112 	if (ret < 0)
5113 		return ret;
5114 
5115 	phy_start(dev->net->phydev);
5116 
5117 	ret = lan78xx_resume(intf);
5118 
5119 	return ret;
5120 }
5121 
5122 static const struct usb_device_id products[] = {
5123 	{
5124 	/* LAN7800 USB Gigabit Ethernet Device */
5125 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5126 	},
5127 	{
5128 	/* LAN7850 USB Gigabit Ethernet Device */
5129 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5130 	},
5131 	{
5132 	/* LAN7801 USB Gigabit Ethernet Device */
5133 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5134 	},
5135 	{
5136 	/* ATM2-AF USB Gigabit Ethernet Device */
5137 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5138 	},
5139 	{},
5140 };
5141 MODULE_DEVICE_TABLE(usb, products);
5142 
5143 static struct usb_driver lan78xx_driver = {
5144 	.name			= DRIVER_NAME,
5145 	.id_table		= products,
5146 	.probe			= lan78xx_probe,
5147 	.disconnect		= lan78xx_disconnect,
5148 	.suspend		= lan78xx_suspend,
5149 	.resume			= lan78xx_resume,
5150 	.reset_resume		= lan78xx_reset_resume,
5151 	.supports_autosuspend	= 1,
5152 	.disable_hub_initiated_lpm = 1,
5153 };
5154 
5155 module_usb_driver(lan78xx_driver);
5156 
5157 MODULE_AUTHOR(DRIVER_AUTHOR);
5158 MODULE_DESCRIPTION(DRIVER_DESC);
5159 MODULE_LICENSE("GPL");
5160