xref: /linux/drivers/net/usb/lan78xx.c (revision 47cf96fbe393839b125a9b694a8cfdd3f4216baa)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret < 0 ? ret : 0;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret < 0 ? ret : 0;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	return lan78xx_write_reg(dev, reg, buf);
678 }
679 
680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 			      struct lan78xx_statstage *data)
682 {
683 	int ret = 0;
684 	int i;
685 	struct lan78xx_statstage *stats;
686 	u32 *src;
687 	u32 *dst;
688 
689 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 	if (!stats)
691 		return -ENOMEM;
692 
693 	ret = usb_control_msg(dev->udev,
694 			      usb_rcvctrlpipe(dev->udev, 0),
695 			      USB_VENDOR_REQUEST_GET_STATS,
696 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 			      0,
698 			      0,
699 			      (void *)stats,
700 			      sizeof(*stats),
701 			      USB_CTRL_SET_TIMEOUT);
702 	if (likely(ret >= 0)) {
703 		src = (u32 *)stats;
704 		dst = (u32 *)data;
705 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 			le32_to_cpus(&src[i]);
707 			dst[i] = src[i];
708 		}
709 	} else {
710 		netdev_warn(dev->net,
711 			    "Failed to read stat ret = %d", ret);
712 	}
713 
714 	kfree(stats);
715 
716 	return ret;
717 }
718 
719 #define check_counter_rollover(struct1, dev_stats, member)		\
720 	do {								\
721 		if ((struct1)->member < (dev_stats).saved.member)	\
722 			(dev_stats).rollover_count.member++;		\
723 	} while (0)
724 
725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 					struct lan78xx_statstage *stats)
727 {
728 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775 
776 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778 
779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 	u32 *p, *count, *max;
782 	u64 *data;
783 	int i;
784 	struct lan78xx_statstage lan78xx_stats;
785 
786 	if (usb_autopm_get_interface(dev->intf) < 0)
787 		return;
788 
789 	p = (u32 *)&lan78xx_stats;
790 	count = (u32 *)&dev->stats.rollover_count;
791 	max = (u32 *)&dev->stats.rollover_max;
792 	data = (u64 *)&dev->stats.curr_stat;
793 
794 	mutex_lock(&dev->stats.access_lock);
795 
796 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798 
799 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801 
802 	mutex_unlock(&dev->stats.access_lock);
803 
804 	usb_autopm_put_interface(dev->intf);
805 }
806 
807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811 
812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 			   u32 hw_disabled)
814 {
815 	unsigned long timeout;
816 	bool stopped = true;
817 	int ret;
818 	u32 buf;
819 
820 	/* Stop the h/w block (if not already stopped) */
821 
822 	ret = lan78xx_read_reg(dev, reg, &buf);
823 	if (ret < 0)
824 		return ret;
825 
826 	if (buf & hw_enabled) {
827 		buf &= ~hw_enabled;
828 
829 		ret = lan78xx_write_reg(dev, reg, buf);
830 		if (ret < 0)
831 			return ret;
832 
833 		stopped = false;
834 		timeout = jiffies + HW_DISABLE_TIMEOUT;
835 		do  {
836 			ret = lan78xx_read_reg(dev, reg, &buf);
837 			if (ret < 0)
838 				return ret;
839 
840 			if (buf & hw_disabled)
841 				stopped = true;
842 			else
843 				msleep(HW_DISABLE_DELAY_MS);
844 		} while (!stopped && !time_after(jiffies, timeout));
845 	}
846 
847 	return stopped ? 0 : -ETIMEDOUT;
848 }
849 
850 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851 {
852 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853 }
854 
855 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856 {
857 	int ret;
858 
859 	netif_dbg(dev, drv, dev->net, "start tx path");
860 
861 	/* Start the MAC transmitter */
862 
863 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 	if (ret < 0)
865 		return ret;
866 
867 	/* Start the Tx FIFO */
868 
869 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 	if (ret < 0)
871 		return ret;
872 
873 	return 0;
874 }
875 
876 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877 {
878 	int ret;
879 
880 	netif_dbg(dev, drv, dev->net, "stop tx path");
881 
882 	/* Stop the Tx FIFO */
883 
884 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 	if (ret < 0)
886 		return ret;
887 
888 	/* Stop the MAC transmitter */
889 
890 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 	if (ret < 0)
892 		return ret;
893 
894 	return 0;
895 }
896 
897 /* The caller must ensure the Tx path is stopped before calling
898  * lan78xx_flush_tx_fifo().
899  */
900 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901 {
902 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903 }
904 
905 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906 {
907 	int ret;
908 
909 	netif_dbg(dev, drv, dev->net, "start rx path");
910 
911 	/* Start the Rx FIFO */
912 
913 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 	if (ret < 0)
915 		return ret;
916 
917 	/* Start the MAC receiver*/
918 
919 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 	if (ret < 0)
921 		return ret;
922 
923 	return 0;
924 }
925 
926 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927 {
928 	int ret;
929 
930 	netif_dbg(dev, drv, dev->net, "stop rx path");
931 
932 	/* Stop the MAC receiver */
933 
934 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 	if (ret < 0)
936 		return ret;
937 
938 	/* Stop the Rx FIFO */
939 
940 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 	if (ret < 0)
942 		return ret;
943 
944 	return 0;
945 }
946 
947 /* The caller must ensure the Rx path is stopped before calling
948  * lan78xx_flush_rx_fifo().
949  */
950 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951 {
952 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953 }
954 
955 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
956 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
957 {
958 	unsigned long start_time = jiffies;
959 	u32 val;
960 	int ret;
961 
962 	do {
963 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
964 		if (ret < 0)
965 			return ret;
966 
967 		if (!(val & MII_ACC_MII_BUSY_))
968 			return 0;
969 	} while (!time_after(jiffies, start_time + HZ));
970 
971 	return -ETIMEDOUT;
972 }
973 
974 static inline u32 mii_access(int id, int index, int read)
975 {
976 	u32 ret;
977 
978 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 	if (read)
981 		ret |= MII_ACC_MII_READ_;
982 	else
983 		ret |= MII_ACC_MII_WRITE_;
984 	ret |= MII_ACC_MII_BUSY_;
985 
986 	return ret;
987 }
988 
989 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990 {
991 	unsigned long start_time = jiffies;
992 	u32 val;
993 	int ret;
994 
995 	do {
996 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
997 		if (ret < 0)
998 			return ret;
999 
1000 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 		    (val & E2P_CMD_EPC_TIMEOUT_))
1002 			break;
1003 		usleep_range(40, 100);
1004 	} while (!time_after(jiffies, start_time + HZ));
1005 
1006 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 		netdev_warn(dev->net, "EEPROM read operation timeout");
1008 		return -ETIMEDOUT;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015 {
1016 	unsigned long start_time = jiffies;
1017 	u32 val;
1018 	int ret;
1019 
1020 	do {
1021 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1022 		if (ret < 0)
1023 			return ret;
1024 
1025 		if (!(val & E2P_CMD_EPC_BUSY_))
1026 			return 0;
1027 
1028 		usleep_range(40, 100);
1029 	} while (!time_after(jiffies, start_time + HZ));
1030 
1031 	netdev_warn(dev->net, "EEPROM is busy");
1032 	return -ETIMEDOUT;
1033 }
1034 
1035 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 				   u32 length, u8 *data)
1037 {
1038 	u32 val, saved;
1039 	int i, ret;
1040 
1041 	/* depends on chip, some EEPROM pins are muxed with LED function.
1042 	 * disable & restore LED function to access EEPROM.
1043 	 */
1044 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	saved = val;
1049 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1050 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1052 		if (ret < 0)
1053 			return ret;
1054 	}
1055 
1056 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 	if (ret == -ETIMEDOUT)
1058 		goto read_raw_eeprom_done;
1059 	/* If USB fails, there is nothing to do */
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	for (i = 0; i < length; i++) {
1064 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1067 		if (ret < 0)
1068 			return ret;
1069 
1070 		ret = lan78xx_wait_eeprom(dev);
1071 		/* Looks like not USB specific error, try to recover */
1072 		if (ret == -ETIMEDOUT)
1073 			goto read_raw_eeprom_done;
1074 		/* If USB fails, there is nothing to do */
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1079 		if (ret < 0)
1080 			return ret;
1081 
1082 		data[i] = val & 0xFF;
1083 		offset++;
1084 	}
1085 
1086 read_raw_eeprom_done:
1087 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1088 		return lan78xx_write_reg(dev, HW_CFG, saved);
1089 
1090 	return 0;
1091 }
1092 
1093 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 			       u32 length, u8 *data)
1095 {
1096 	int ret;
1097 	u8 sig;
1098 
1099 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1100 	if (ret < 0)
1101 		return ret;
1102 
1103 	if (sig != EEPROM_INDICATOR)
1104 		return -ENODATA;
1105 
1106 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1107 }
1108 
1109 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 				    u32 length, u8 *data)
1111 {
1112 	u32 val;
1113 	u32 saved;
1114 	int i, ret;
1115 
1116 	/* depends on chip, some EEPROM pins are muxed with LED function.
1117 	 * disable & restore LED function to access EEPROM.
1118 	 */
1119 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1120 	if (ret < 0)
1121 		return ret;
1122 
1123 	saved = val;
1124 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1125 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1127 		if (ret < 0)
1128 			return ret;
1129 	}
1130 
1131 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 	/* Looks like not USB specific error, try to recover */
1133 	if (ret == -ETIMEDOUT)
1134 		goto write_raw_eeprom_done;
1135 	/* If USB fails, there is nothing to do */
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	/* Issue write/erase enable command */
1140 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	ret = lan78xx_wait_eeprom(dev);
1146 	/* Looks like not USB specific error, try to recover */
1147 	if (ret == -ETIMEDOUT)
1148 		goto write_raw_eeprom_done;
1149 	/* If USB fails, there is nothing to do */
1150 	if (ret < 0)
1151 		return ret;
1152 
1153 	for (i = 0; i < length; i++) {
1154 		/* Fill data register */
1155 		val = data[i];
1156 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1157 		if (ret < 0)
1158 			return ret;
1159 
1160 		/* Send "write" command */
1161 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1164 		if (ret < 0)
1165 			return ret;
1166 
1167 		ret = lan78xx_wait_eeprom(dev);
1168 		/* Looks like not USB specific error, try to recover */
1169 		if (ret == -ETIMEDOUT)
1170 			goto write_raw_eeprom_done;
1171 		/* If USB fails, there is nothing to do */
1172 		if (ret < 0)
1173 			return ret;
1174 
1175 		offset++;
1176 	}
1177 
1178 write_raw_eeprom_done:
1179 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1180 		return lan78xx_write_reg(dev, HW_CFG, saved);
1181 
1182 	return 0;
1183 }
1184 
1185 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 				u32 length, u8 *data)
1187 {
1188 	unsigned long timeout;
1189 	int ret, i;
1190 	u32 buf;
1191 
1192 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 	if (ret < 0)
1194 		return ret;
1195 
1196 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 		/* clear it and wait to be cleared */
1198 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 		if (ret < 0)
1200 			return ret;
1201 
1202 		timeout = jiffies + HZ;
1203 		do {
1204 			usleep_range(1, 10);
1205 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 			if (ret < 0)
1207 				return ret;
1208 
1209 			if (time_after(jiffies, timeout)) {
1210 				netdev_warn(dev->net,
1211 					    "timeout on OTP_PWR_DN");
1212 				return -ETIMEDOUT;
1213 			}
1214 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1215 	}
1216 
1217 	for (i = 0; i < length; i++) {
1218 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 		if (ret < 0)
1221 			return ret;
1222 
1223 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 					((offset + i) & OTP_ADDR2_10_3));
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 		if (ret < 0)
1234 			return ret;
1235 
1236 		timeout = jiffies + HZ;
1237 		do {
1238 			udelay(1);
1239 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 			if (ret < 0)
1241 				return ret;
1242 
1243 			if (time_after(jiffies, timeout)) {
1244 				netdev_warn(dev->net,
1245 					    "timeout on OTP_STATUS");
1246 				return -ETIMEDOUT;
1247 			}
1248 		} while (buf & OTP_STATUS_BUSY_);
1249 
1250 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 		if (ret < 0)
1252 			return ret;
1253 
1254 		data[i] = (u8)(buf & 0xFF);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 				 u32 length, u8 *data)
1262 {
1263 	int i;
1264 	u32 buf;
1265 	unsigned long timeout;
1266 	int ret;
1267 
1268 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 		/* clear it and wait to be cleared */
1274 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 		if (ret < 0)
1276 			return ret;
1277 
1278 		timeout = jiffies + HZ;
1279 		do {
1280 			udelay(1);
1281 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 			if (ret < 0)
1283 				return ret;
1284 
1285 			if (time_after(jiffies, timeout)) {
1286 				netdev_warn(dev->net,
1287 					    "timeout on OTP_PWR_DN completion");
1288 				return -ETIMEDOUT;
1289 			}
1290 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1291 	}
1292 
1293 	/* set to BYTE program mode */
1294 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 	if (ret < 0)
1296 		return ret;
1297 
1298 	for (i = 0; i < length; i++) {
1299 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 		if (ret < 0)
1302 			return ret;
1303 
1304 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 					((offset + i) & OTP_ADDR2_10_3));
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 		if (ret < 0)
1319 			return ret;
1320 
1321 		timeout = jiffies + HZ;
1322 		do {
1323 			udelay(1);
1324 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 			if (ret < 0)
1326 				return ret;
1327 
1328 			if (time_after(jiffies, timeout)) {
1329 				netdev_warn(dev->net,
1330 					    "Timeout on OTP_STATUS completion");
1331 				return -ETIMEDOUT;
1332 			}
1333 		} while (buf & OTP_STATUS_BUSY_);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 			    u32 length, u8 *data)
1341 {
1342 	u8 sig;
1343 	int ret;
1344 
1345 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346 
1347 	if (ret == 0) {
1348 		if (sig == OTP_INDICATOR_2)
1349 			offset += 0x100;
1350 		else if (sig != OTP_INDICATOR_1)
1351 			ret = -EINVAL;
1352 		if (!ret)
1353 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360 {
1361 	int i, ret;
1362 
1363 	for (i = 0; i < 100; i++) {
1364 		u32 dp_sel;
1365 
1366 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 		if (unlikely(ret < 0))
1368 			return ret;
1369 
1370 		if (dp_sel & DP_SEL_DPRDY_)
1371 			return 0;
1372 
1373 		usleep_range(40, 100);
1374 	}
1375 
1376 	netdev_warn(dev->net, "%s timed out", __func__);
1377 
1378 	return -ETIMEDOUT;
1379 }
1380 
1381 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 				  u32 addr, u32 length, u32 *buf)
1383 {
1384 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 	int i, ret;
1386 
1387 	ret = usb_autopm_get_interface(dev->intf);
1388 	if (ret < 0)
1389 		return ret;
1390 
1391 	mutex_lock(&pdata->dataport_mutex);
1392 
1393 	ret = lan78xx_dataport_wait_not_busy(dev);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 	if (ret < 0)
1399 		goto dataport_write;
1400 
1401 	for (i = 0; i < length; i++) {
1402 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 
1414 		ret = lan78xx_dataport_wait_not_busy(dev);
1415 		if (ret < 0)
1416 			goto dataport_write;
1417 	}
1418 
1419 dataport_write:
1420 	if (ret < 0)
1421 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422 
1423 	mutex_unlock(&pdata->dataport_mutex);
1424 	usb_autopm_put_interface(dev->intf);
1425 
1426 	return ret;
1427 }
1428 
1429 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 				    int index, u8 addr[ETH_ALEN])
1431 {
1432 	u32 temp;
1433 
1434 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 		temp = addr[3];
1436 		temp = addr[2] | (temp << 8);
1437 		temp = addr[1] | (temp << 8);
1438 		temp = addr[0] | (temp << 8);
1439 		pdata->pfilter_table[index][1] = temp;
1440 		temp = addr[5];
1441 		temp = addr[4] | (temp << 8);
1442 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 		pdata->pfilter_table[index][0] = temp;
1444 	}
1445 }
1446 
1447 /* returns hash bit number for given MAC address */
1448 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449 {
1450 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451 }
1452 
1453 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454 {
1455 	struct lan78xx_priv *pdata =
1456 			container_of(param, struct lan78xx_priv, set_multicast);
1457 	struct lan78xx_net *dev = pdata->dev;
1458 	int i, ret;
1459 
1460 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 		  pdata->rfe_ctl);
1462 
1463 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 				     DP_SEL_VHF_VLAN_LEN,
1465 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 	if (ret < 0)
1467 		goto multicast_write_done;
1468 
1469 	for (i = 1; i < NUM_OF_MAF; i++) {
1470 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 		if (ret < 0)
1472 			goto multicast_write_done;
1473 
1474 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 					pdata->pfilter_table[i][1]);
1476 		if (ret < 0)
1477 			goto multicast_write_done;
1478 
1479 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 					pdata->pfilter_table[i][0]);
1481 		if (ret < 0)
1482 			goto multicast_write_done;
1483 	}
1484 
1485 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486 
1487 multicast_write_done:
1488 	if (ret < 0)
1489 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 	return;
1491 }
1492 
1493 static void lan78xx_set_multicast(struct net_device *netdev)
1494 {
1495 	struct lan78xx_net *dev = netdev_priv(netdev);
1496 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 	unsigned long flags;
1498 	int i;
1499 
1500 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501 
1502 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504 
1505 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1506 		pdata->mchash_table[i] = 0;
1507 
1508 	/* pfilter_table[0] has own HW address */
1509 	for (i = 1; i < NUM_OF_MAF; i++) {
1510 		pdata->pfilter_table[i][0] = 0;
1511 		pdata->pfilter_table[i][1] = 0;
1512 	}
1513 
1514 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515 
1516 	if (dev->net->flags & IFF_PROMISC) {
1517 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 	} else {
1520 		if (dev->net->flags & IFF_ALLMULTI) {
1521 			netif_dbg(dev, drv, dev->net,
1522 				  "receive all multicast enabled");
1523 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 		}
1525 	}
1526 
1527 	if (netdev_mc_count(dev->net)) {
1528 		struct netdev_hw_addr *ha;
1529 		int i;
1530 
1531 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532 
1533 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534 
1535 		i = 1;
1536 		netdev_for_each_mc_addr(ha, netdev) {
1537 			/* set first 32 into Perfect Filter */
1538 			if (i < 33) {
1539 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 			} else {
1541 				u32 bitnum = lan78xx_hash(ha->addr);
1542 
1543 				pdata->mchash_table[bitnum / 32] |=
1544 							(1 << (bitnum % 32));
1545 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 			}
1547 			i++;
1548 		}
1549 	}
1550 
1551 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552 
1553 	/* defer register writes to a sleepable context */
1554 	schedule_work(&pdata->set_multicast);
1555 }
1556 
1557 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1558 				      u16 lcladv, u16 rmtadv)
1559 {
1560 	u32 flow = 0, fct_flow = 0;
1561 	u8 cap;
1562 
1563 	if (dev->fc_autoneg)
1564 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1565 	else
1566 		cap = dev->fc_request_control;
1567 
1568 	if (cap & FLOW_CTRL_TX)
1569 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1570 
1571 	if (cap & FLOW_CTRL_RX)
1572 		flow |= FLOW_CR_RX_FCEN_;
1573 
1574 	if (dev->udev->speed == USB_SPEED_SUPER)
1575 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1576 	else if (dev->udev->speed == USB_SPEED_HIGH)
1577 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1578 
1579 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1580 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1581 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1582 
1583 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1584 
1585 	/* threshold value should be set before enabling flow */
1586 	lan78xx_write_reg(dev, FLOW, flow);
1587 
1588 	return 0;
1589 }
1590 
1591 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1592 
1593 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1594 {
1595 	unsigned long start_time = jiffies;
1596 	u32 val;
1597 	int ret;
1598 
1599 	mutex_lock(&dev->mdiobus_mutex);
1600 
1601 	/* Resetting the device while there is activity on the MDIO
1602 	 * bus can result in the MAC interface locking up and not
1603 	 * completing register access transactions.
1604 	 */
1605 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1606 	if (ret < 0)
1607 		goto exit_unlock;
1608 
1609 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1610 	if (ret < 0)
1611 		goto exit_unlock;
1612 
1613 	val |= MAC_CR_RST_;
1614 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1615 	if (ret < 0)
1616 		goto exit_unlock;
1617 
1618 	/* Wait for the reset to complete before allowing any further
1619 	 * MAC register accesses otherwise the MAC may lock up.
1620 	 */
1621 	do {
1622 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1623 		if (ret < 0)
1624 			goto exit_unlock;
1625 
1626 		if (!(val & MAC_CR_RST_)) {
1627 			ret = 0;
1628 			goto exit_unlock;
1629 		}
1630 	} while (!time_after(jiffies, start_time + HZ));
1631 
1632 	ret = -ETIMEDOUT;
1633 exit_unlock:
1634 	mutex_unlock(&dev->mdiobus_mutex);
1635 
1636 	return ret;
1637 }
1638 
1639 static int lan78xx_link_reset(struct lan78xx_net *dev)
1640 {
1641 	struct phy_device *phydev = dev->net->phydev;
1642 	struct ethtool_link_ksettings ecmd;
1643 	int ladv, radv, ret, link;
1644 	u32 buf;
1645 
1646 	/* clear LAN78xx interrupt status */
1647 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1648 	if (unlikely(ret < 0))
1649 		return ret;
1650 
1651 	mutex_lock(&phydev->lock);
1652 	phy_read_status(phydev);
1653 	link = phydev->link;
1654 	mutex_unlock(&phydev->lock);
1655 
1656 	if (!link && dev->link_on) {
1657 		dev->link_on = false;
1658 
1659 		/* reset MAC */
1660 		ret = lan78xx_mac_reset(dev);
1661 		if (ret < 0)
1662 			return ret;
1663 
1664 		timer_delete(&dev->stat_monitor);
1665 	} else if (link && !dev->link_on) {
1666 		dev->link_on = true;
1667 
1668 		phy_ethtool_ksettings_get(phydev, &ecmd);
1669 
1670 		if (dev->udev->speed == USB_SPEED_SUPER) {
1671 			if (ecmd.base.speed == 1000) {
1672 				/* disable U2 */
1673 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1674 				if (ret < 0)
1675 					return ret;
1676 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1677 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1678 				if (ret < 0)
1679 					return ret;
1680 				/* enable U1 */
1681 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1682 				if (ret < 0)
1683 					return ret;
1684 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1685 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1686 				if (ret < 0)
1687 					return ret;
1688 			} else {
1689 				/* enable U1 & U2 */
1690 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1691 				if (ret < 0)
1692 					return ret;
1693 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1694 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1695 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1696 				if (ret < 0)
1697 					return ret;
1698 			}
1699 		}
1700 
1701 		ladv = phy_read(phydev, MII_ADVERTISE);
1702 		if (ladv < 0)
1703 			return ladv;
1704 
1705 		radv = phy_read(phydev, MII_LPA);
1706 		if (radv < 0)
1707 			return radv;
1708 
1709 		netif_dbg(dev, link, dev->net,
1710 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1711 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1712 
1713 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1714 						 radv);
1715 		if (ret < 0)
1716 			return ret;
1717 
1718 		if (!timer_pending(&dev->stat_monitor)) {
1719 			dev->delta = 1;
1720 			mod_timer(&dev->stat_monitor,
1721 				  jiffies + STAT_UPDATE_TIMER);
1722 		}
1723 
1724 		lan78xx_rx_urb_submit_all(dev);
1725 
1726 		local_bh_disable();
1727 		napi_schedule(&dev->napi);
1728 		local_bh_enable();
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 /* some work can't be done in tasklets, so we use keventd
1735  *
1736  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1737  * but tasklet_schedule() doesn't.	hope the failure is rare.
1738  */
1739 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1740 {
1741 	set_bit(work, &dev->flags);
1742 	if (!schedule_delayed_work(&dev->wq, 0))
1743 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1744 }
1745 
1746 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1747 {
1748 	u32 intdata;
1749 
1750 	if (urb->actual_length != 4) {
1751 		netdev_warn(dev->net,
1752 			    "unexpected urb length %d", urb->actual_length);
1753 		return;
1754 	}
1755 
1756 	intdata = get_unaligned_le32(urb->transfer_buffer);
1757 
1758 	if (intdata & INT_ENP_PHY_INT) {
1759 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1760 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1761 
1762 		if (dev->domain_data.phyirq > 0)
1763 			generic_handle_irq_safe(dev->domain_data.phyirq);
1764 	} else {
1765 		netdev_warn(dev->net,
1766 			    "unexpected interrupt: 0x%08x\n", intdata);
1767 	}
1768 }
1769 
1770 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1771 {
1772 	return MAX_EEPROM_SIZE;
1773 }
1774 
1775 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1776 				      struct ethtool_eeprom *ee, u8 *data)
1777 {
1778 	struct lan78xx_net *dev = netdev_priv(netdev);
1779 	int ret;
1780 
1781 	ret = usb_autopm_get_interface(dev->intf);
1782 	if (ret)
1783 		return ret;
1784 
1785 	ee->magic = LAN78XX_EEPROM_MAGIC;
1786 
1787 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1788 
1789 	usb_autopm_put_interface(dev->intf);
1790 
1791 	return ret;
1792 }
1793 
1794 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1795 				      struct ethtool_eeprom *ee, u8 *data)
1796 {
1797 	struct lan78xx_net *dev = netdev_priv(netdev);
1798 	int ret;
1799 
1800 	ret = usb_autopm_get_interface(dev->intf);
1801 	if (ret)
1802 		return ret;
1803 
1804 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1805 	 * to load data from EEPROM
1806 	 */
1807 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1808 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1809 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1810 		 (ee->offset == 0) &&
1811 		 (ee->len == 512) &&
1812 		 (data[0] == OTP_INDICATOR_1))
1813 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1814 
1815 	usb_autopm_put_interface(dev->intf);
1816 
1817 	return ret;
1818 }
1819 
1820 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1821 				u8 *data)
1822 {
1823 	if (stringset == ETH_SS_STATS)
1824 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1825 }
1826 
1827 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1828 {
1829 	if (sset == ETH_SS_STATS)
1830 		return ARRAY_SIZE(lan78xx_gstrings);
1831 	else
1832 		return -EOPNOTSUPP;
1833 }
1834 
1835 static void lan78xx_get_stats(struct net_device *netdev,
1836 			      struct ethtool_stats *stats, u64 *data)
1837 {
1838 	struct lan78xx_net *dev = netdev_priv(netdev);
1839 
1840 	lan78xx_update_stats(dev);
1841 
1842 	mutex_lock(&dev->stats.access_lock);
1843 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1844 	mutex_unlock(&dev->stats.access_lock);
1845 }
1846 
1847 static void lan78xx_get_wol(struct net_device *netdev,
1848 			    struct ethtool_wolinfo *wol)
1849 {
1850 	struct lan78xx_net *dev = netdev_priv(netdev);
1851 	int ret;
1852 	u32 buf;
1853 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1854 
1855 	if (usb_autopm_get_interface(dev->intf) < 0)
1856 		return;
1857 
1858 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1859 	if (unlikely(ret < 0)) {
1860 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1861 		wol->supported = 0;
1862 		wol->wolopts = 0;
1863 	} else {
1864 		if (buf & USB_CFG_RMT_WKP_) {
1865 			wol->supported = WAKE_ALL;
1866 			wol->wolopts = pdata->wol;
1867 		} else {
1868 			wol->supported = 0;
1869 			wol->wolopts = 0;
1870 		}
1871 	}
1872 
1873 	usb_autopm_put_interface(dev->intf);
1874 }
1875 
1876 static int lan78xx_set_wol(struct net_device *netdev,
1877 			   struct ethtool_wolinfo *wol)
1878 {
1879 	struct lan78xx_net *dev = netdev_priv(netdev);
1880 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1881 	int ret;
1882 
1883 	if (wol->wolopts & ~WAKE_ALL)
1884 		return -EINVAL;
1885 
1886 	ret = usb_autopm_get_interface(dev->intf);
1887 	if (ret < 0)
1888 		return ret;
1889 
1890 	pdata->wol = wol->wolopts;
1891 
1892 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1893 	if (ret < 0)
1894 		goto exit_pm_put;
1895 
1896 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1897 
1898 exit_pm_put:
1899 	usb_autopm_put_interface(dev->intf);
1900 
1901 	return ret;
1902 }
1903 
1904 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1905 {
1906 	struct lan78xx_net *dev = netdev_priv(net);
1907 	struct phy_device *phydev = net->phydev;
1908 	int ret;
1909 	u32 buf;
1910 
1911 	ret = usb_autopm_get_interface(dev->intf);
1912 	if (ret < 0)
1913 		return ret;
1914 
1915 	ret = phy_ethtool_get_eee(phydev, edata);
1916 	if (ret < 0)
1917 		goto exit;
1918 
1919 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1920 	if (buf & MAC_CR_EEE_EN_) {
1921 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1922 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1923 		edata->tx_lpi_timer = buf;
1924 	} else {
1925 		edata->tx_lpi_timer = 0;
1926 	}
1927 
1928 	ret = 0;
1929 exit:
1930 	usb_autopm_put_interface(dev->intf);
1931 
1932 	return ret;
1933 }
1934 
1935 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1936 {
1937 	struct lan78xx_net *dev = netdev_priv(net);
1938 	int ret;
1939 	u32 buf;
1940 
1941 	ret = usb_autopm_get_interface(dev->intf);
1942 	if (ret < 0)
1943 		return ret;
1944 
1945 	ret = phy_ethtool_set_eee(net->phydev, edata);
1946 	if (ret < 0)
1947 		goto out;
1948 
1949 	buf = (u32)edata->tx_lpi_timer;
1950 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1951 out:
1952 	usb_autopm_put_interface(dev->intf);
1953 
1954 	return ret;
1955 }
1956 
1957 static u32 lan78xx_get_link(struct net_device *net)
1958 {
1959 	u32 link;
1960 
1961 	mutex_lock(&net->phydev->lock);
1962 	phy_read_status(net->phydev);
1963 	link = net->phydev->link;
1964 	mutex_unlock(&net->phydev->lock);
1965 
1966 	return link;
1967 }
1968 
1969 static void lan78xx_get_drvinfo(struct net_device *net,
1970 				struct ethtool_drvinfo *info)
1971 {
1972 	struct lan78xx_net *dev = netdev_priv(net);
1973 
1974 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1975 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1976 }
1977 
1978 static u32 lan78xx_get_msglevel(struct net_device *net)
1979 {
1980 	struct lan78xx_net *dev = netdev_priv(net);
1981 
1982 	return dev->msg_enable;
1983 }
1984 
1985 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1986 {
1987 	struct lan78xx_net *dev = netdev_priv(net);
1988 
1989 	dev->msg_enable = level;
1990 }
1991 
1992 static int lan78xx_get_link_ksettings(struct net_device *net,
1993 				      struct ethtool_link_ksettings *cmd)
1994 {
1995 	struct lan78xx_net *dev = netdev_priv(net);
1996 	struct phy_device *phydev = net->phydev;
1997 	int ret;
1998 
1999 	ret = usb_autopm_get_interface(dev->intf);
2000 	if (ret < 0)
2001 		return ret;
2002 
2003 	phy_ethtool_ksettings_get(phydev, cmd);
2004 
2005 	usb_autopm_put_interface(dev->intf);
2006 
2007 	return ret;
2008 }
2009 
2010 static int lan78xx_set_link_ksettings(struct net_device *net,
2011 				      const struct ethtool_link_ksettings *cmd)
2012 {
2013 	struct lan78xx_net *dev = netdev_priv(net);
2014 	struct phy_device *phydev = net->phydev;
2015 	int ret = 0;
2016 	int temp;
2017 
2018 	ret = usb_autopm_get_interface(dev->intf);
2019 	if (ret < 0)
2020 		return ret;
2021 
2022 	/* change speed & duplex */
2023 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2024 
2025 	if (!cmd->base.autoneg) {
2026 		/* force link down */
2027 		temp = phy_read(phydev, MII_BMCR);
2028 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2029 		mdelay(1);
2030 		phy_write(phydev, MII_BMCR, temp);
2031 	}
2032 
2033 	usb_autopm_put_interface(dev->intf);
2034 
2035 	return ret;
2036 }
2037 
2038 static void lan78xx_get_pause(struct net_device *net,
2039 			      struct ethtool_pauseparam *pause)
2040 {
2041 	struct lan78xx_net *dev = netdev_priv(net);
2042 	struct phy_device *phydev = net->phydev;
2043 	struct ethtool_link_ksettings ecmd;
2044 
2045 	phy_ethtool_ksettings_get(phydev, &ecmd);
2046 
2047 	pause->autoneg = dev->fc_autoneg;
2048 
2049 	if (dev->fc_request_control & FLOW_CTRL_TX)
2050 		pause->tx_pause = 1;
2051 
2052 	if (dev->fc_request_control & FLOW_CTRL_RX)
2053 		pause->rx_pause = 1;
2054 }
2055 
2056 static int lan78xx_set_pause(struct net_device *net,
2057 			     struct ethtool_pauseparam *pause)
2058 {
2059 	struct lan78xx_net *dev = netdev_priv(net);
2060 	struct phy_device *phydev = net->phydev;
2061 	struct ethtool_link_ksettings ecmd;
2062 	int ret;
2063 
2064 	phy_ethtool_ksettings_get(phydev, &ecmd);
2065 
2066 	if (pause->autoneg && !ecmd.base.autoneg) {
2067 		ret = -EINVAL;
2068 		goto exit;
2069 	}
2070 
2071 	dev->fc_request_control = 0;
2072 	if (pause->rx_pause)
2073 		dev->fc_request_control |= FLOW_CTRL_RX;
2074 
2075 	if (pause->tx_pause)
2076 		dev->fc_request_control |= FLOW_CTRL_TX;
2077 
2078 	if (ecmd.base.autoneg) {
2079 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2080 		u32 mii_adv;
2081 
2082 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2083 				   ecmd.link_modes.advertising);
2084 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2085 				   ecmd.link_modes.advertising);
2086 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2087 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2088 		linkmode_or(ecmd.link_modes.advertising, fc,
2089 			    ecmd.link_modes.advertising);
2090 
2091 		phy_ethtool_ksettings_set(phydev, &ecmd);
2092 	}
2093 
2094 	dev->fc_autoneg = pause->autoneg;
2095 
2096 	ret = 0;
2097 exit:
2098 	return ret;
2099 }
2100 
2101 static int lan78xx_get_regs_len(struct net_device *netdev)
2102 {
2103 	return sizeof(lan78xx_regs);
2104 }
2105 
2106 static void
2107 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2108 		 void *buf)
2109 {
2110 	struct lan78xx_net *dev = netdev_priv(netdev);
2111 	unsigned int data_count = 0;
2112 	u32 *data = buf;
2113 	int i, ret;
2114 
2115 	/* Read Device/MAC registers */
2116 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2117 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2118 		if (ret < 0) {
2119 			netdev_warn(dev->net,
2120 				    "failed to read register 0x%08x\n",
2121 				    lan78xx_regs[i]);
2122 			goto clean_data;
2123 		}
2124 
2125 		data_count++;
2126 	}
2127 
2128 	return;
2129 
2130 clean_data:
2131 	memset(data, 0, data_count * sizeof(u32));
2132 }
2133 
2134 static const struct ethtool_ops lan78xx_ethtool_ops = {
2135 	.get_link	= lan78xx_get_link,
2136 	.nway_reset	= phy_ethtool_nway_reset,
2137 	.get_drvinfo	= lan78xx_get_drvinfo,
2138 	.get_msglevel	= lan78xx_get_msglevel,
2139 	.set_msglevel	= lan78xx_set_msglevel,
2140 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2141 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2142 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2143 	.get_ethtool_stats = lan78xx_get_stats,
2144 	.get_sset_count = lan78xx_get_sset_count,
2145 	.get_strings	= lan78xx_get_strings,
2146 	.get_wol	= lan78xx_get_wol,
2147 	.set_wol	= lan78xx_set_wol,
2148 	.get_ts_info	= ethtool_op_get_ts_info,
2149 	.get_eee	= lan78xx_get_eee,
2150 	.set_eee	= lan78xx_set_eee,
2151 	.get_pauseparam	= lan78xx_get_pause,
2152 	.set_pauseparam	= lan78xx_set_pause,
2153 	.get_link_ksettings = lan78xx_get_link_ksettings,
2154 	.set_link_ksettings = lan78xx_set_link_ksettings,
2155 	.get_regs_len	= lan78xx_get_regs_len,
2156 	.get_regs	= lan78xx_get_regs,
2157 };
2158 
2159 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2160 {
2161 	u32 addr_lo, addr_hi;
2162 	u8 addr[6];
2163 	int ret;
2164 
2165 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2166 	if (ret < 0)
2167 		return ret;
2168 
2169 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2170 	if (ret < 0)
2171 		return ret;
2172 
2173 	addr[0] = addr_lo & 0xFF;
2174 	addr[1] = (addr_lo >> 8) & 0xFF;
2175 	addr[2] = (addr_lo >> 16) & 0xFF;
2176 	addr[3] = (addr_lo >> 24) & 0xFF;
2177 	addr[4] = addr_hi & 0xFF;
2178 	addr[5] = (addr_hi >> 8) & 0xFF;
2179 
2180 	if (!is_valid_ether_addr(addr)) {
2181 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2182 			/* valid address present in Device Tree */
2183 			netif_dbg(dev, ifup, dev->net,
2184 				  "MAC address read from Device Tree");
2185 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2186 						 ETH_ALEN, addr) == 0) ||
2187 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2188 					      ETH_ALEN, addr) == 0)) &&
2189 			   is_valid_ether_addr(addr)) {
2190 			/* eeprom values are valid so use them */
2191 			netif_dbg(dev, ifup, dev->net,
2192 				  "MAC address read from EEPROM");
2193 		} else {
2194 			/* generate random MAC */
2195 			eth_random_addr(addr);
2196 			netif_dbg(dev, ifup, dev->net,
2197 				  "MAC address set to random addr");
2198 		}
2199 
2200 		addr_lo = addr[0] | (addr[1] << 8) |
2201 			  (addr[2] << 16) | (addr[3] << 24);
2202 		addr_hi = addr[4] | (addr[5] << 8);
2203 
2204 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2205 		if (ret < 0)
2206 			return ret;
2207 
2208 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2209 		if (ret < 0)
2210 			return ret;
2211 	}
2212 
2213 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2214 	if (ret < 0)
2215 		return ret;
2216 
2217 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2218 	if (ret < 0)
2219 		return ret;
2220 
2221 	eth_hw_addr_set(dev->net, addr);
2222 
2223 	return 0;
2224 }
2225 
2226 /* MDIO read and write wrappers for phylib */
2227 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2228 {
2229 	struct lan78xx_net *dev = bus->priv;
2230 	u32 val, addr;
2231 	int ret;
2232 
2233 	ret = usb_autopm_get_interface(dev->intf);
2234 	if (ret < 0)
2235 		return ret;
2236 
2237 	mutex_lock(&dev->mdiobus_mutex);
2238 
2239 	/* confirm MII not busy */
2240 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2241 	if (ret < 0)
2242 		goto done;
2243 
2244 	/* set the address, index & direction (read from PHY) */
2245 	addr = mii_access(phy_id, idx, MII_READ);
2246 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2247 	if (ret < 0)
2248 		goto done;
2249 
2250 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2251 	if (ret < 0)
2252 		goto done;
2253 
2254 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2255 	if (ret < 0)
2256 		goto done;
2257 
2258 	ret = (int)(val & 0xFFFF);
2259 
2260 done:
2261 	mutex_unlock(&dev->mdiobus_mutex);
2262 	usb_autopm_put_interface(dev->intf);
2263 
2264 	return ret;
2265 }
2266 
2267 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2268 				 u16 regval)
2269 {
2270 	struct lan78xx_net *dev = bus->priv;
2271 	u32 val, addr;
2272 	int ret;
2273 
2274 	ret = usb_autopm_get_interface(dev->intf);
2275 	if (ret < 0)
2276 		return ret;
2277 
2278 	mutex_lock(&dev->mdiobus_mutex);
2279 
2280 	/* confirm MII not busy */
2281 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2282 	if (ret < 0)
2283 		goto done;
2284 
2285 	val = (u32)regval;
2286 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2287 	if (ret < 0)
2288 		goto done;
2289 
2290 	/* set the address, index & direction (write to PHY) */
2291 	addr = mii_access(phy_id, idx, MII_WRITE);
2292 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2293 	if (ret < 0)
2294 		goto done;
2295 
2296 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2297 	if (ret < 0)
2298 		goto done;
2299 
2300 done:
2301 	mutex_unlock(&dev->mdiobus_mutex);
2302 	usb_autopm_put_interface(dev->intf);
2303 	return ret;
2304 }
2305 
2306 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2307 {
2308 	struct device_node *node;
2309 	int ret;
2310 
2311 	dev->mdiobus = mdiobus_alloc();
2312 	if (!dev->mdiobus) {
2313 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2314 		return -ENOMEM;
2315 	}
2316 
2317 	dev->mdiobus->priv = (void *)dev;
2318 	dev->mdiobus->read = lan78xx_mdiobus_read;
2319 	dev->mdiobus->write = lan78xx_mdiobus_write;
2320 	dev->mdiobus->name = "lan78xx-mdiobus";
2321 	dev->mdiobus->parent = &dev->udev->dev;
2322 
2323 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2324 		 dev->udev->bus->busnum, dev->udev->devnum);
2325 
2326 	switch (dev->chipid) {
2327 	case ID_REV_CHIP_ID_7800_:
2328 	case ID_REV_CHIP_ID_7850_:
2329 		/* set to internal PHY id */
2330 		dev->mdiobus->phy_mask = ~(1 << 1);
2331 		break;
2332 	case ID_REV_CHIP_ID_7801_:
2333 		/* scan thru PHYAD[2..0] */
2334 		dev->mdiobus->phy_mask = ~(0xFF);
2335 		break;
2336 	}
2337 
2338 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2339 	ret = of_mdiobus_register(dev->mdiobus, node);
2340 	of_node_put(node);
2341 	if (ret) {
2342 		netdev_err(dev->net, "can't register MDIO bus\n");
2343 		goto exit1;
2344 	}
2345 
2346 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2347 	return 0;
2348 exit1:
2349 	mdiobus_free(dev->mdiobus);
2350 	return ret;
2351 }
2352 
2353 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2354 {
2355 	mdiobus_unregister(dev->mdiobus);
2356 	mdiobus_free(dev->mdiobus);
2357 }
2358 
2359 static void lan78xx_link_status_change(struct net_device *net)
2360 {
2361 	struct lan78xx_net *dev = netdev_priv(net);
2362 	struct phy_device *phydev = net->phydev;
2363 	u32 data;
2364 	int ret;
2365 
2366 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2367 	if (ret < 0)
2368 		return;
2369 
2370 	if (phydev->enable_tx_lpi)
2371 		data |=  MAC_CR_EEE_EN_;
2372 	else
2373 		data &= ~MAC_CR_EEE_EN_;
2374 	lan78xx_write_reg(dev, MAC_CR, data);
2375 
2376 	phy_print_status(phydev);
2377 }
2378 
2379 static int irq_map(struct irq_domain *d, unsigned int irq,
2380 		   irq_hw_number_t hwirq)
2381 {
2382 	struct irq_domain_data *data = d->host_data;
2383 
2384 	irq_set_chip_data(irq, data);
2385 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2386 	irq_set_noprobe(irq);
2387 
2388 	return 0;
2389 }
2390 
2391 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2392 {
2393 	irq_set_chip_and_handler(irq, NULL, NULL);
2394 	irq_set_chip_data(irq, NULL);
2395 }
2396 
2397 static const struct irq_domain_ops chip_domain_ops = {
2398 	.map	= irq_map,
2399 	.unmap	= irq_unmap,
2400 };
2401 
2402 static void lan78xx_irq_mask(struct irq_data *irqd)
2403 {
2404 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2405 
2406 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2407 }
2408 
2409 static void lan78xx_irq_unmask(struct irq_data *irqd)
2410 {
2411 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2412 
2413 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2414 }
2415 
2416 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2417 {
2418 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2419 
2420 	mutex_lock(&data->irq_lock);
2421 }
2422 
2423 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2424 {
2425 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2426 	struct lan78xx_net *dev =
2427 			container_of(data, struct lan78xx_net, domain_data);
2428 	u32 buf;
2429 	int ret;
2430 
2431 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2432 	 * are only two callbacks executed in non-atomic contex.
2433 	 */
2434 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2435 	if (ret < 0)
2436 		goto irq_bus_sync_unlock;
2437 
2438 	if (buf != data->irqenable)
2439 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2440 
2441 irq_bus_sync_unlock:
2442 	if (ret < 0)
2443 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2444 			   ERR_PTR(ret));
2445 
2446 	mutex_unlock(&data->irq_lock);
2447 }
2448 
2449 static struct irq_chip lan78xx_irqchip = {
2450 	.name			= "lan78xx-irqs",
2451 	.irq_mask		= lan78xx_irq_mask,
2452 	.irq_unmask		= lan78xx_irq_unmask,
2453 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2454 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2455 };
2456 
2457 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2458 {
2459 	struct irq_domain *irqdomain;
2460 	unsigned int irqmap = 0;
2461 	u32 buf;
2462 	int ret = 0;
2463 
2464 	mutex_init(&dev->domain_data.irq_lock);
2465 
2466 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2467 	if (ret < 0)
2468 		return ret;
2469 
2470 	dev->domain_data.irqenable = buf;
2471 
2472 	dev->domain_data.irqchip = &lan78xx_irqchip;
2473 	dev->domain_data.irq_handler = handle_simple_irq;
2474 
2475 	irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
2476 					     MAX_INT_EP, 0,
2477 					     &chip_domain_ops,
2478 					     &dev->domain_data);
2479 	if (irqdomain) {
2480 		/* create mapping for PHY interrupt */
2481 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2482 		if (!irqmap) {
2483 			irq_domain_remove(irqdomain);
2484 
2485 			irqdomain = NULL;
2486 			ret = -EINVAL;
2487 		}
2488 	} else {
2489 		ret = -EINVAL;
2490 	}
2491 
2492 	dev->domain_data.irqdomain = irqdomain;
2493 	dev->domain_data.phyirq = irqmap;
2494 
2495 	return ret;
2496 }
2497 
2498 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2499 {
2500 	if (dev->domain_data.phyirq > 0) {
2501 		irq_dispose_mapping(dev->domain_data.phyirq);
2502 
2503 		if (dev->domain_data.irqdomain)
2504 			irq_domain_remove(dev->domain_data.irqdomain);
2505 	}
2506 	dev->domain_data.phyirq = 0;
2507 	dev->domain_data.irqdomain = NULL;
2508 }
2509 
2510 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2511 {
2512 	u32 buf;
2513 	int ret;
2514 	struct fixed_phy_status fphy_status = {
2515 		.link = 1,
2516 		.speed = SPEED_1000,
2517 		.duplex = DUPLEX_FULL,
2518 	};
2519 	struct phy_device *phydev;
2520 
2521 	phydev = phy_find_first(dev->mdiobus);
2522 	if (!phydev) {
2523 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2524 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2525 		if (IS_ERR(phydev)) {
2526 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2527 			return NULL;
2528 		}
2529 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2530 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2531 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2532 					MAC_RGMII_ID_TXC_DELAY_EN_);
2533 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2534 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2535 		buf |= HW_CFG_CLK125_EN_;
2536 		buf |= HW_CFG_REFCLK25_EN_;
2537 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2538 	} else {
2539 		if (!phydev->drv) {
2540 			netdev_err(dev->net, "no PHY driver found\n");
2541 			return NULL;
2542 		}
2543 		dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2544 		/* The PHY driver is responsible to configure proper RGMII
2545 		 * interface delays. Disable RGMII delays on MAC side.
2546 		 */
2547 		lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2548 
2549 		phydev->is_internal = false;
2550 	}
2551 	return phydev;
2552 }
2553 
2554 static int lan78xx_phy_init(struct lan78xx_net *dev)
2555 {
2556 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2557 	int ret;
2558 	u32 mii_adv;
2559 	struct phy_device *phydev;
2560 
2561 	switch (dev->chipid) {
2562 	case ID_REV_CHIP_ID_7801_:
2563 		phydev = lan7801_phy_init(dev);
2564 		if (!phydev) {
2565 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2566 			return -EIO;
2567 		}
2568 		break;
2569 
2570 	case ID_REV_CHIP_ID_7800_:
2571 	case ID_REV_CHIP_ID_7850_:
2572 		phydev = phy_find_first(dev->mdiobus);
2573 		if (!phydev) {
2574 			netdev_err(dev->net, "no PHY found\n");
2575 			return -EIO;
2576 		}
2577 		phydev->is_internal = true;
2578 		dev->interface = PHY_INTERFACE_MODE_GMII;
2579 		break;
2580 
2581 	default:
2582 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2583 		return -EIO;
2584 	}
2585 
2586 	/* if phyirq is not set, use polling mode in phylib */
2587 	if (dev->domain_data.phyirq > 0)
2588 		phydev->irq = dev->domain_data.phyirq;
2589 	else
2590 		phydev->irq = PHY_POLL;
2591 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2592 
2593 	/* set to AUTOMDIX */
2594 	phydev->mdix = ETH_TP_MDI_AUTO;
2595 
2596 	ret = phy_connect_direct(dev->net, phydev,
2597 				 lan78xx_link_status_change,
2598 				 dev->interface);
2599 	if (ret) {
2600 		netdev_err(dev->net, "can't attach PHY to %s\n",
2601 			   dev->mdiobus->id);
2602 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2603 			if (phy_is_pseudo_fixed_link(phydev)) {
2604 				fixed_phy_unregister(phydev);
2605 				phy_device_free(phydev);
2606 			}
2607 		}
2608 		return -EIO;
2609 	}
2610 
2611 	/* MAC doesn't support 1000T Half */
2612 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2613 
2614 	/* support both flow controls */
2615 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2616 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2617 			   phydev->advertising);
2618 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2619 			   phydev->advertising);
2620 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2621 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2622 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2623 
2624 	phy_support_eee(phydev);
2625 
2626 	if (phydev->mdio.dev.of_node) {
2627 		u32 reg;
2628 		int len;
2629 
2630 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2631 						      "microchip,led-modes",
2632 						      sizeof(u32));
2633 		if (len >= 0) {
2634 			/* Ensure the appropriate LEDs are enabled */
2635 			lan78xx_read_reg(dev, HW_CFG, &reg);
2636 			reg &= ~(HW_CFG_LED0_EN_ |
2637 				 HW_CFG_LED1_EN_ |
2638 				 HW_CFG_LED2_EN_ |
2639 				 HW_CFG_LED3_EN_);
2640 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2641 				(len > 1) * HW_CFG_LED1_EN_ |
2642 				(len > 2) * HW_CFG_LED2_EN_ |
2643 				(len > 3) * HW_CFG_LED3_EN_;
2644 			lan78xx_write_reg(dev, HW_CFG, reg);
2645 		}
2646 	}
2647 
2648 	genphy_config_aneg(phydev);
2649 
2650 	dev->fc_autoneg = phydev->autoneg;
2651 
2652 	return 0;
2653 }
2654 
2655 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2656 {
2657 	bool rxenabled;
2658 	u32 buf;
2659 	int ret;
2660 
2661 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2662 	if (ret < 0)
2663 		return ret;
2664 
2665 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2666 
2667 	if (rxenabled) {
2668 		buf &= ~MAC_RX_RXEN_;
2669 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2670 		if (ret < 0)
2671 			return ret;
2672 	}
2673 
2674 	/* add 4 to size for FCS */
2675 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2676 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2677 
2678 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2679 	if (ret < 0)
2680 		return ret;
2681 
2682 	if (rxenabled) {
2683 		buf |= MAC_RX_RXEN_;
2684 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2685 		if (ret < 0)
2686 			return ret;
2687 	}
2688 
2689 	return 0;
2690 }
2691 
2692 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2693 {
2694 	struct sk_buff *skb;
2695 	unsigned long flags;
2696 	int count = 0;
2697 
2698 	spin_lock_irqsave(&q->lock, flags);
2699 	while (!skb_queue_empty(q)) {
2700 		struct skb_data	*entry;
2701 		struct urb *urb;
2702 		int ret;
2703 
2704 		skb_queue_walk(q, skb) {
2705 			entry = (struct skb_data *)skb->cb;
2706 			if (entry->state != unlink_start)
2707 				goto found;
2708 		}
2709 		break;
2710 found:
2711 		entry->state = unlink_start;
2712 		urb = entry->urb;
2713 
2714 		/* Get reference count of the URB to avoid it to be
2715 		 * freed during usb_unlink_urb, which may trigger
2716 		 * use-after-free problem inside usb_unlink_urb since
2717 		 * usb_unlink_urb is always racing with .complete
2718 		 * handler(include defer_bh).
2719 		 */
2720 		usb_get_urb(urb);
2721 		spin_unlock_irqrestore(&q->lock, flags);
2722 		/* during some PM-driven resume scenarios,
2723 		 * these (async) unlinks complete immediately
2724 		 */
2725 		ret = usb_unlink_urb(urb);
2726 		if (ret != -EINPROGRESS && ret != 0)
2727 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2728 		else
2729 			count++;
2730 		usb_put_urb(urb);
2731 		spin_lock_irqsave(&q->lock, flags);
2732 	}
2733 	spin_unlock_irqrestore(&q->lock, flags);
2734 	return count;
2735 }
2736 
2737 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2738 {
2739 	struct lan78xx_net *dev = netdev_priv(netdev);
2740 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2741 	int ret;
2742 
2743 	/* no second zero-length packet read wanted after mtu-sized packets */
2744 	if ((max_frame_len % dev->maxpacket) == 0)
2745 		return -EDOM;
2746 
2747 	ret = usb_autopm_get_interface(dev->intf);
2748 	if (ret < 0)
2749 		return ret;
2750 
2751 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2752 	if (ret < 0)
2753 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2754 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2755 	else
2756 		WRITE_ONCE(netdev->mtu, new_mtu);
2757 
2758 	usb_autopm_put_interface(dev->intf);
2759 
2760 	return ret;
2761 }
2762 
2763 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2764 {
2765 	struct lan78xx_net *dev = netdev_priv(netdev);
2766 	struct sockaddr *addr = p;
2767 	u32 addr_lo, addr_hi;
2768 	int ret;
2769 
2770 	if (netif_running(netdev))
2771 		return -EBUSY;
2772 
2773 	if (!is_valid_ether_addr(addr->sa_data))
2774 		return -EADDRNOTAVAIL;
2775 
2776 	eth_hw_addr_set(netdev, addr->sa_data);
2777 
2778 	addr_lo = netdev->dev_addr[0] |
2779 		  netdev->dev_addr[1] << 8 |
2780 		  netdev->dev_addr[2] << 16 |
2781 		  netdev->dev_addr[3] << 24;
2782 	addr_hi = netdev->dev_addr[4] |
2783 		  netdev->dev_addr[5] << 8;
2784 
2785 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2786 	if (ret < 0)
2787 		return ret;
2788 
2789 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2790 	if (ret < 0)
2791 		return ret;
2792 
2793 	/* Added to support MAC address changes */
2794 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2795 	if (ret < 0)
2796 		return ret;
2797 
2798 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2799 }
2800 
2801 /* Enable or disable Rx checksum offload engine */
2802 static int lan78xx_set_features(struct net_device *netdev,
2803 				netdev_features_t features)
2804 {
2805 	struct lan78xx_net *dev = netdev_priv(netdev);
2806 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2807 	unsigned long flags;
2808 
2809 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2810 
2811 	if (features & NETIF_F_RXCSUM) {
2812 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2813 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2814 	} else {
2815 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2816 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2817 	}
2818 
2819 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2820 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2821 	else
2822 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2823 
2824 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2825 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2826 	else
2827 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2828 
2829 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2830 
2831 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2832 }
2833 
2834 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2835 {
2836 	struct lan78xx_priv *pdata =
2837 			container_of(param, struct lan78xx_priv, set_vlan);
2838 	struct lan78xx_net *dev = pdata->dev;
2839 
2840 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2841 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2842 }
2843 
2844 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2845 				   __be16 proto, u16 vid)
2846 {
2847 	struct lan78xx_net *dev = netdev_priv(netdev);
2848 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2849 	u16 vid_bit_index;
2850 	u16 vid_dword_index;
2851 
2852 	vid_dword_index = (vid >> 5) & 0x7F;
2853 	vid_bit_index = vid & 0x1F;
2854 
2855 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2856 
2857 	/* defer register writes to a sleepable context */
2858 	schedule_work(&pdata->set_vlan);
2859 
2860 	return 0;
2861 }
2862 
2863 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2864 				    __be16 proto, u16 vid)
2865 {
2866 	struct lan78xx_net *dev = netdev_priv(netdev);
2867 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2868 	u16 vid_bit_index;
2869 	u16 vid_dword_index;
2870 
2871 	vid_dword_index = (vid >> 5) & 0x7F;
2872 	vid_bit_index = vid & 0x1F;
2873 
2874 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2875 
2876 	/* defer register writes to a sleepable context */
2877 	schedule_work(&pdata->set_vlan);
2878 
2879 	return 0;
2880 }
2881 
2882 static int lan78xx_init_ltm(struct lan78xx_net *dev)
2883 {
2884 	u32 regs[6] = { 0 };
2885 	int ret;
2886 	u32 buf;
2887 
2888 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2889 	if (ret < 0)
2890 		goto init_ltm_failed;
2891 
2892 	if (buf & USB_CFG1_LTM_ENABLE_) {
2893 		u8 temp[2];
2894 		/* Get values from EEPROM first */
2895 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2896 			if (temp[0] == 24) {
2897 				ret = lan78xx_read_raw_eeprom(dev,
2898 							      temp[1] * 2,
2899 							      24,
2900 							      (u8 *)regs);
2901 				if (ret < 0)
2902 					return ret;
2903 			}
2904 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2905 			if (temp[0] == 24) {
2906 				ret = lan78xx_read_raw_otp(dev,
2907 							   temp[1] * 2,
2908 							   24,
2909 							   (u8 *)regs);
2910 				if (ret < 0)
2911 					return ret;
2912 			}
2913 		}
2914 	}
2915 
2916 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2917 	if (ret < 0)
2918 		goto init_ltm_failed;
2919 
2920 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2921 	if (ret < 0)
2922 		goto init_ltm_failed;
2923 
2924 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2925 	if (ret < 0)
2926 		goto init_ltm_failed;
2927 
2928 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2929 	if (ret < 0)
2930 		goto init_ltm_failed;
2931 
2932 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2933 	if (ret < 0)
2934 		goto init_ltm_failed;
2935 
2936 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2937 	if (ret < 0)
2938 		goto init_ltm_failed;
2939 
2940 	return 0;
2941 
2942 init_ltm_failed:
2943 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
2944 	return ret;
2945 }
2946 
2947 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2948 {
2949 	int result = 0;
2950 
2951 	switch (dev->udev->speed) {
2952 	case USB_SPEED_SUPER:
2953 		dev->rx_urb_size = RX_SS_URB_SIZE;
2954 		dev->tx_urb_size = TX_SS_URB_SIZE;
2955 		dev->n_rx_urbs = RX_SS_URB_NUM;
2956 		dev->n_tx_urbs = TX_SS_URB_NUM;
2957 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2958 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2959 		break;
2960 	case USB_SPEED_HIGH:
2961 		dev->rx_urb_size = RX_HS_URB_SIZE;
2962 		dev->tx_urb_size = TX_HS_URB_SIZE;
2963 		dev->n_rx_urbs = RX_HS_URB_NUM;
2964 		dev->n_tx_urbs = TX_HS_URB_NUM;
2965 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2966 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2967 		break;
2968 	case USB_SPEED_FULL:
2969 		dev->rx_urb_size = RX_FS_URB_SIZE;
2970 		dev->tx_urb_size = TX_FS_URB_SIZE;
2971 		dev->n_rx_urbs = RX_FS_URB_NUM;
2972 		dev->n_tx_urbs = TX_FS_URB_NUM;
2973 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2974 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2975 		break;
2976 	default:
2977 		netdev_warn(dev->net, "USB bus speed not supported\n");
2978 		result = -EIO;
2979 		break;
2980 	}
2981 
2982 	return result;
2983 }
2984 
2985 static int lan78xx_reset(struct lan78xx_net *dev)
2986 {
2987 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2988 	unsigned long timeout;
2989 	int ret;
2990 	u32 buf;
2991 	u8 sig;
2992 
2993 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	buf |= HW_CFG_LRST_;
2998 
2999 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	timeout = jiffies + HZ;
3004 	do {
3005 		mdelay(1);
3006 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3007 		if (ret < 0)
3008 			return ret;
3009 
3010 		if (time_after(jiffies, timeout)) {
3011 			netdev_warn(dev->net,
3012 				    "timeout on completion of LiteReset");
3013 			ret = -ETIMEDOUT;
3014 			return ret;
3015 		}
3016 	} while (buf & HW_CFG_LRST_);
3017 
3018 	ret = lan78xx_init_mac_address(dev);
3019 	if (ret < 0)
3020 		return ret;
3021 
3022 	/* save DEVID for later usage */
3023 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3024 	if (ret < 0)
3025 		return ret;
3026 
3027 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3028 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3029 
3030 	/* Respond to the IN token with a NAK */
3031 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3032 	if (ret < 0)
3033 		return ret;
3034 
3035 	buf |= USB_CFG_BIR_;
3036 
3037 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3038 	if (ret < 0)
3039 		return ret;
3040 
3041 	/* Init LTM */
3042 	ret = lan78xx_init_ltm(dev);
3043 	if (ret < 0)
3044 		return ret;
3045 
3046 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3047 	if (ret < 0)
3048 		return ret;
3049 
3050 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3051 	if (ret < 0)
3052 		return ret;
3053 
3054 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3055 	if (ret < 0)
3056 		return ret;
3057 
3058 	buf |= HW_CFG_MEF_;
3059 	buf |= HW_CFG_CLK125_EN_;
3060 	buf |= HW_CFG_REFCLK25_EN_;
3061 
3062 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3063 	if (ret < 0)
3064 		return ret;
3065 
3066 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3067 	if (ret < 0)
3068 		return ret;
3069 
3070 	buf |= USB_CFG_BCE_;
3071 
3072 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3073 	if (ret < 0)
3074 		return ret;
3075 
3076 	/* set FIFO sizes */
3077 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3078 
3079 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3080 	if (ret < 0)
3081 		return ret;
3082 
3083 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3084 
3085 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3086 	if (ret < 0)
3087 		return ret;
3088 
3089 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3090 	if (ret < 0)
3091 		return ret;
3092 
3093 	ret = lan78xx_write_reg(dev, FLOW, 0);
3094 	if (ret < 0)
3095 		return ret;
3096 
3097 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3098 	if (ret < 0)
3099 		return ret;
3100 
3101 	/* Don't need rfe_ctl_lock during initialisation */
3102 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3103 	if (ret < 0)
3104 		return ret;
3105 
3106 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3107 
3108 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3109 	if (ret < 0)
3110 		return ret;
3111 
3112 	/* Enable or disable checksum offload engines */
3113 	ret = lan78xx_set_features(dev->net, dev->net->features);
3114 	if (ret < 0)
3115 		return ret;
3116 
3117 	lan78xx_set_multicast(dev->net);
3118 
3119 	/* reset PHY */
3120 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3121 	if (ret < 0)
3122 		return ret;
3123 
3124 	buf |= PMT_CTL_PHY_RST_;
3125 
3126 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3127 	if (ret < 0)
3128 		return ret;
3129 
3130 	timeout = jiffies + HZ;
3131 	do {
3132 		mdelay(1);
3133 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3134 		if (ret < 0)
3135 			return ret;
3136 
3137 		if (time_after(jiffies, timeout)) {
3138 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3139 			ret = -ETIMEDOUT;
3140 			return ret;
3141 		}
3142 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3143 
3144 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3145 	if (ret < 0)
3146 		return ret;
3147 
3148 	/* LAN7801 only has RGMII mode */
3149 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3150 		buf &= ~MAC_CR_GMII_EN_;
3151 		/* Enable Auto Duplex and Auto speed */
3152 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3153 	}
3154 
3155 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3156 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3157 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3158 		if (!ret && sig != EEPROM_INDICATOR) {
3159 			/* Implies there is no external eeprom. Set mac speed */
3160 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3161 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3162 		}
3163 	}
3164 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3165 	if (ret < 0)
3166 		return ret;
3167 
3168 	ret = lan78xx_set_rx_max_frame_length(dev,
3169 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3170 
3171 	return ret;
3172 }
3173 
3174 static void lan78xx_init_stats(struct lan78xx_net *dev)
3175 {
3176 	u32 *p;
3177 	int i;
3178 
3179 	/* initialize for stats update
3180 	 * some counters are 20bits and some are 32bits
3181 	 */
3182 	p = (u32 *)&dev->stats.rollover_max;
3183 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3184 		p[i] = 0xFFFFF;
3185 
3186 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3187 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3188 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3189 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3190 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3191 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3192 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3193 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3194 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3195 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3196 
3197 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3198 }
3199 
3200 static int lan78xx_open(struct net_device *net)
3201 {
3202 	struct lan78xx_net *dev = netdev_priv(net);
3203 	int ret;
3204 
3205 	netif_dbg(dev, ifup, dev->net, "open device");
3206 
3207 	ret = usb_autopm_get_interface(dev->intf);
3208 	if (ret < 0)
3209 		return ret;
3210 
3211 	mutex_lock(&dev->dev_mutex);
3212 
3213 	phy_start(net->phydev);
3214 
3215 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3216 
3217 	/* for Link Check */
3218 	if (dev->urb_intr) {
3219 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3220 		if (ret < 0) {
3221 			netif_err(dev, ifup, dev->net,
3222 				  "intr submit %d\n", ret);
3223 			goto done;
3224 		}
3225 	}
3226 
3227 	ret = lan78xx_flush_rx_fifo(dev);
3228 	if (ret < 0)
3229 		goto done;
3230 	ret = lan78xx_flush_tx_fifo(dev);
3231 	if (ret < 0)
3232 		goto done;
3233 
3234 	ret = lan78xx_start_tx_path(dev);
3235 	if (ret < 0)
3236 		goto done;
3237 	ret = lan78xx_start_rx_path(dev);
3238 	if (ret < 0)
3239 		goto done;
3240 
3241 	lan78xx_init_stats(dev);
3242 
3243 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3244 
3245 	netif_start_queue(net);
3246 
3247 	dev->link_on = false;
3248 
3249 	napi_enable(&dev->napi);
3250 
3251 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3252 done:
3253 	mutex_unlock(&dev->dev_mutex);
3254 
3255 	if (ret < 0)
3256 		usb_autopm_put_interface(dev->intf);
3257 
3258 	return ret;
3259 }
3260 
3261 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3262 {
3263 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3264 	DECLARE_WAITQUEUE(wait, current);
3265 	int temp;
3266 
3267 	/* ensure there are no more active urbs */
3268 	add_wait_queue(&unlink_wakeup, &wait);
3269 	set_current_state(TASK_UNINTERRUPTIBLE);
3270 	dev->wait = &unlink_wakeup;
3271 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3272 
3273 	/* maybe wait for deletions to finish. */
3274 	while (!skb_queue_empty(&dev->rxq) ||
3275 	       !skb_queue_empty(&dev->txq)) {
3276 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3277 		set_current_state(TASK_UNINTERRUPTIBLE);
3278 		netif_dbg(dev, ifdown, dev->net,
3279 			  "waited for %d urb completions", temp);
3280 	}
3281 	set_current_state(TASK_RUNNING);
3282 	dev->wait = NULL;
3283 	remove_wait_queue(&unlink_wakeup, &wait);
3284 
3285 	/* empty Rx done, Rx overflow and Tx pend queues
3286 	 */
3287 	while (!skb_queue_empty(&dev->rxq_done)) {
3288 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3289 
3290 		lan78xx_release_rx_buf(dev, skb);
3291 	}
3292 
3293 	skb_queue_purge(&dev->rxq_overflow);
3294 	skb_queue_purge(&dev->txq_pend);
3295 }
3296 
3297 static int lan78xx_stop(struct net_device *net)
3298 {
3299 	struct lan78xx_net *dev = netdev_priv(net);
3300 
3301 	netif_dbg(dev, ifup, dev->net, "stop device");
3302 
3303 	mutex_lock(&dev->dev_mutex);
3304 
3305 	if (timer_pending(&dev->stat_monitor))
3306 		timer_delete_sync(&dev->stat_monitor);
3307 
3308 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3309 	netif_stop_queue(net);
3310 	napi_disable(&dev->napi);
3311 
3312 	lan78xx_terminate_urbs(dev);
3313 
3314 	netif_info(dev, ifdown, dev->net,
3315 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3316 		   net->stats.rx_packets, net->stats.tx_packets,
3317 		   net->stats.rx_errors, net->stats.tx_errors);
3318 
3319 	/* ignore errors that occur stopping the Tx and Rx data paths */
3320 	lan78xx_stop_tx_path(dev);
3321 	lan78xx_stop_rx_path(dev);
3322 
3323 	if (net->phydev)
3324 		phy_stop(net->phydev);
3325 
3326 	usb_kill_urb(dev->urb_intr);
3327 
3328 	/* deferred work (task, timer, softirq) must also stop.
3329 	 * can't flush_scheduled_work() until we drop rtnl (later),
3330 	 * else workers could deadlock; so make workers a NOP.
3331 	 */
3332 	clear_bit(EVENT_TX_HALT, &dev->flags);
3333 	clear_bit(EVENT_RX_HALT, &dev->flags);
3334 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3335 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3336 
3337 	cancel_delayed_work_sync(&dev->wq);
3338 
3339 	usb_autopm_put_interface(dev->intf);
3340 
3341 	mutex_unlock(&dev->dev_mutex);
3342 
3343 	return 0;
3344 }
3345 
3346 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3347 			       struct sk_buff_head *list, enum skb_state state)
3348 {
3349 	unsigned long flags;
3350 	enum skb_state old_state;
3351 	struct skb_data *entry = (struct skb_data *)skb->cb;
3352 
3353 	spin_lock_irqsave(&list->lock, flags);
3354 	old_state = entry->state;
3355 	entry->state = state;
3356 
3357 	__skb_unlink(skb, list);
3358 	spin_unlock(&list->lock);
3359 	spin_lock(&dev->rxq_done.lock);
3360 
3361 	__skb_queue_tail(&dev->rxq_done, skb);
3362 	if (skb_queue_len(&dev->rxq_done) == 1)
3363 		napi_schedule(&dev->napi);
3364 
3365 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3366 
3367 	return old_state;
3368 }
3369 
3370 static void tx_complete(struct urb *urb)
3371 {
3372 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3373 	struct skb_data *entry = (struct skb_data *)skb->cb;
3374 	struct lan78xx_net *dev = entry->dev;
3375 
3376 	if (urb->status == 0) {
3377 		dev->net->stats.tx_packets += entry->num_of_packet;
3378 		dev->net->stats.tx_bytes += entry->length;
3379 	} else {
3380 		dev->net->stats.tx_errors += entry->num_of_packet;
3381 
3382 		switch (urb->status) {
3383 		case -EPIPE:
3384 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3385 			break;
3386 
3387 		/* software-driven interface shutdown */
3388 		case -ECONNRESET:
3389 		case -ESHUTDOWN:
3390 			netif_dbg(dev, tx_err, dev->net,
3391 				  "tx err interface gone %d\n",
3392 				  entry->urb->status);
3393 			break;
3394 
3395 		case -EPROTO:
3396 		case -ETIME:
3397 		case -EILSEQ:
3398 			netif_stop_queue(dev->net);
3399 			netif_dbg(dev, tx_err, dev->net,
3400 				  "tx err queue stopped %d\n",
3401 				  entry->urb->status);
3402 			break;
3403 		default:
3404 			netif_dbg(dev, tx_err, dev->net,
3405 				  "unknown tx err %d\n",
3406 				  entry->urb->status);
3407 			break;
3408 		}
3409 	}
3410 
3411 	usb_autopm_put_interface_async(dev->intf);
3412 
3413 	skb_unlink(skb, &dev->txq);
3414 
3415 	lan78xx_release_tx_buf(dev, skb);
3416 
3417 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3418 	 */
3419 	if (skb_queue_empty(&dev->txq) &&
3420 	    !skb_queue_empty(&dev->txq_pend))
3421 		napi_schedule(&dev->napi);
3422 }
3423 
3424 static void lan78xx_queue_skb(struct sk_buff_head *list,
3425 			      struct sk_buff *newsk, enum skb_state state)
3426 {
3427 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3428 
3429 	__skb_queue_tail(list, newsk);
3430 	entry->state = state;
3431 }
3432 
3433 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3434 {
3435 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3436 }
3437 
3438 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3439 {
3440 	return dev->tx_pend_data_len;
3441 }
3442 
3443 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3444 				    struct sk_buff *skb,
3445 				    unsigned int *tx_pend_data_len)
3446 {
3447 	unsigned long flags;
3448 
3449 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3450 
3451 	__skb_queue_tail(&dev->txq_pend, skb);
3452 
3453 	dev->tx_pend_data_len += skb->len;
3454 	*tx_pend_data_len = dev->tx_pend_data_len;
3455 
3456 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3457 }
3458 
3459 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3460 					 struct sk_buff *skb,
3461 					 unsigned int *tx_pend_data_len)
3462 {
3463 	unsigned long flags;
3464 
3465 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3466 
3467 	__skb_queue_head(&dev->txq_pend, skb);
3468 
3469 	dev->tx_pend_data_len += skb->len;
3470 	*tx_pend_data_len = dev->tx_pend_data_len;
3471 
3472 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3473 }
3474 
3475 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3476 				    struct sk_buff **skb,
3477 				    unsigned int *tx_pend_data_len)
3478 {
3479 	unsigned long flags;
3480 
3481 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3482 
3483 	*skb = __skb_dequeue(&dev->txq_pend);
3484 	if (*skb)
3485 		dev->tx_pend_data_len -= (*skb)->len;
3486 	*tx_pend_data_len = dev->tx_pend_data_len;
3487 
3488 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3489 }
3490 
3491 static netdev_tx_t
3492 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3493 {
3494 	struct lan78xx_net *dev = netdev_priv(net);
3495 	unsigned int tx_pend_data_len;
3496 
3497 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3498 		schedule_delayed_work(&dev->wq, 0);
3499 
3500 	skb_tx_timestamp(skb);
3501 
3502 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3503 
3504 	/* Set up a Tx URB if none is in progress */
3505 
3506 	if (skb_queue_empty(&dev->txq))
3507 		napi_schedule(&dev->napi);
3508 
3509 	/* Stop stack Tx queue if we have enough data to fill
3510 	 * all the free Tx URBs.
3511 	 */
3512 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3513 		netif_stop_queue(net);
3514 
3515 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3516 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3517 
3518 		/* Kick off transmission of pending data */
3519 
3520 		if (!skb_queue_empty(&dev->txq_free))
3521 			napi_schedule(&dev->napi);
3522 	}
3523 
3524 	return NETDEV_TX_OK;
3525 }
3526 
3527 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3528 {
3529 	struct lan78xx_priv *pdata = NULL;
3530 	int ret;
3531 	int i;
3532 
3533 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3534 
3535 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3536 	if (!pdata) {
3537 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3538 		return -ENOMEM;
3539 	}
3540 
3541 	pdata->dev = dev;
3542 
3543 	spin_lock_init(&pdata->rfe_ctl_lock);
3544 	mutex_init(&pdata->dataport_mutex);
3545 
3546 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3547 
3548 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3549 		pdata->vlan_table[i] = 0;
3550 
3551 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3552 
3553 	dev->net->features = 0;
3554 
3555 	if (DEFAULT_TX_CSUM_ENABLE)
3556 		dev->net->features |= NETIF_F_HW_CSUM;
3557 
3558 	if (DEFAULT_RX_CSUM_ENABLE)
3559 		dev->net->features |= NETIF_F_RXCSUM;
3560 
3561 	if (DEFAULT_TSO_CSUM_ENABLE)
3562 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3563 
3564 	if (DEFAULT_VLAN_RX_OFFLOAD)
3565 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3566 
3567 	if (DEFAULT_VLAN_FILTER_ENABLE)
3568 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3569 
3570 	dev->net->hw_features = dev->net->features;
3571 
3572 	ret = lan78xx_setup_irq_domain(dev);
3573 	if (ret < 0) {
3574 		netdev_warn(dev->net,
3575 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3576 		goto out1;
3577 	}
3578 
3579 	/* Init all registers */
3580 	ret = lan78xx_reset(dev);
3581 	if (ret) {
3582 		netdev_warn(dev->net, "Registers INIT FAILED....");
3583 		goto out2;
3584 	}
3585 
3586 	ret = lan78xx_mdio_init(dev);
3587 	if (ret) {
3588 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3589 		goto out2;
3590 	}
3591 
3592 	dev->net->flags |= IFF_MULTICAST;
3593 
3594 	pdata->wol = WAKE_MAGIC;
3595 
3596 	return ret;
3597 
3598 out2:
3599 	lan78xx_remove_irq_domain(dev);
3600 
3601 out1:
3602 	netdev_warn(dev->net, "Bind routine FAILED");
3603 	cancel_work_sync(&pdata->set_multicast);
3604 	cancel_work_sync(&pdata->set_vlan);
3605 	kfree(pdata);
3606 	return ret;
3607 }
3608 
3609 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3610 {
3611 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3612 
3613 	lan78xx_remove_irq_domain(dev);
3614 
3615 	lan78xx_remove_mdio(dev);
3616 
3617 	if (pdata) {
3618 		cancel_work_sync(&pdata->set_multicast);
3619 		cancel_work_sync(&pdata->set_vlan);
3620 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3621 		kfree(pdata);
3622 		pdata = NULL;
3623 		dev->data[0] = 0;
3624 	}
3625 }
3626 
3627 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3628 				    struct sk_buff *skb,
3629 				    u32 rx_cmd_a, u32 rx_cmd_b)
3630 {
3631 	/* HW Checksum offload appears to be flawed if used when not stripping
3632 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3633 	 */
3634 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3635 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3636 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3637 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3638 		skb->ip_summed = CHECKSUM_NONE;
3639 	} else {
3640 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3641 		skb->ip_summed = CHECKSUM_COMPLETE;
3642 	}
3643 }
3644 
3645 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3646 				    struct sk_buff *skb,
3647 				    u32 rx_cmd_a, u32 rx_cmd_b)
3648 {
3649 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3650 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3651 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3652 				       (rx_cmd_b & 0xffff));
3653 }
3654 
3655 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3656 {
3657 	dev->net->stats.rx_packets++;
3658 	dev->net->stats.rx_bytes += skb->len;
3659 
3660 	skb->protocol = eth_type_trans(skb, dev->net);
3661 
3662 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3663 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3664 	memset(skb->cb, 0, sizeof(struct skb_data));
3665 
3666 	if (skb_defer_rx_timestamp(skb))
3667 		return;
3668 
3669 	napi_gro_receive(&dev->napi, skb);
3670 }
3671 
3672 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3673 		      int budget, int *work_done)
3674 {
3675 	if (skb->len < RX_SKB_MIN_LEN)
3676 		return 0;
3677 
3678 	/* Extract frames from the URB buffer and pass each one to
3679 	 * the stack in a new NAPI SKB.
3680 	 */
3681 	while (skb->len > 0) {
3682 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3683 		u16 rx_cmd_c;
3684 		unsigned char *packet;
3685 
3686 		rx_cmd_a = get_unaligned_le32(skb->data);
3687 		skb_pull(skb, sizeof(rx_cmd_a));
3688 
3689 		rx_cmd_b = get_unaligned_le32(skb->data);
3690 		skb_pull(skb, sizeof(rx_cmd_b));
3691 
3692 		rx_cmd_c = get_unaligned_le16(skb->data);
3693 		skb_pull(skb, sizeof(rx_cmd_c));
3694 
3695 		packet = skb->data;
3696 
3697 		/* get the packet length */
3698 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3699 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3700 
3701 		if (unlikely(size > skb->len)) {
3702 			netif_dbg(dev, rx_err, dev->net,
3703 				  "size err rx_cmd_a=0x%08x\n",
3704 				  rx_cmd_a);
3705 			return 0;
3706 		}
3707 
3708 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3709 			netif_dbg(dev, rx_err, dev->net,
3710 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3711 		} else {
3712 			u32 frame_len;
3713 			struct sk_buff *skb2;
3714 
3715 			if (unlikely(size < ETH_FCS_LEN)) {
3716 				netif_dbg(dev, rx_err, dev->net,
3717 					  "size err rx_cmd_a=0x%08x\n",
3718 					  rx_cmd_a);
3719 				return 0;
3720 			}
3721 
3722 			frame_len = size - ETH_FCS_LEN;
3723 
3724 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3725 			if (!skb2)
3726 				return 0;
3727 
3728 			memcpy(skb2->data, packet, frame_len);
3729 
3730 			skb_put(skb2, frame_len);
3731 
3732 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3733 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3734 
3735 			/* Processing of the URB buffer must complete once
3736 			 * it has started. If the NAPI work budget is exhausted
3737 			 * while frames remain they are added to the overflow
3738 			 * queue for delivery in the next NAPI polling cycle.
3739 			 */
3740 			if (*work_done < budget) {
3741 				lan78xx_skb_return(dev, skb2);
3742 				++(*work_done);
3743 			} else {
3744 				skb_queue_tail(&dev->rxq_overflow, skb2);
3745 			}
3746 		}
3747 
3748 		skb_pull(skb, size);
3749 
3750 		/* skip padding bytes before the next frame starts */
3751 		if (skb->len)
3752 			skb_pull(skb, align_count);
3753 	}
3754 
3755 	return 1;
3756 }
3757 
3758 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3759 			      int budget, int *work_done)
3760 {
3761 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3762 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3763 		dev->net->stats.rx_errors++;
3764 	}
3765 }
3766 
3767 static void rx_complete(struct urb *urb)
3768 {
3769 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3770 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3771 	struct lan78xx_net *dev = entry->dev;
3772 	int urb_status = urb->status;
3773 	enum skb_state state;
3774 
3775 	netif_dbg(dev, rx_status, dev->net,
3776 		  "rx done: status %d", urb->status);
3777 
3778 	skb_put(skb, urb->actual_length);
3779 	state = rx_done;
3780 
3781 	if (urb != entry->urb)
3782 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3783 
3784 	switch (urb_status) {
3785 	case 0:
3786 		if (skb->len < RX_SKB_MIN_LEN) {
3787 			state = rx_cleanup;
3788 			dev->net->stats.rx_errors++;
3789 			dev->net->stats.rx_length_errors++;
3790 			netif_dbg(dev, rx_err, dev->net,
3791 				  "rx length %d\n", skb->len);
3792 		}
3793 		usb_mark_last_busy(dev->udev);
3794 		break;
3795 	case -EPIPE:
3796 		dev->net->stats.rx_errors++;
3797 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3798 		fallthrough;
3799 	case -ECONNRESET:				/* async unlink */
3800 	case -ESHUTDOWN:				/* hardware gone */
3801 		netif_dbg(dev, ifdown, dev->net,
3802 			  "rx shutdown, code %d\n", urb_status);
3803 		state = rx_cleanup;
3804 		break;
3805 	case -EPROTO:
3806 	case -ETIME:
3807 	case -EILSEQ:
3808 		dev->net->stats.rx_errors++;
3809 		state = rx_cleanup;
3810 		break;
3811 
3812 	/* data overrun ... flush fifo? */
3813 	case -EOVERFLOW:
3814 		dev->net->stats.rx_over_errors++;
3815 		fallthrough;
3816 
3817 	default:
3818 		state = rx_cleanup;
3819 		dev->net->stats.rx_errors++;
3820 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3821 		break;
3822 	}
3823 
3824 	state = defer_bh(dev, skb, &dev->rxq, state);
3825 }
3826 
3827 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3828 {
3829 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3830 	size_t size = dev->rx_urb_size;
3831 	struct urb *urb = entry->urb;
3832 	unsigned long lockflags;
3833 	int ret = 0;
3834 
3835 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3836 			  skb->data, size, rx_complete, skb);
3837 
3838 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3839 
3840 	if (netif_device_present(dev->net) &&
3841 	    netif_running(dev->net) &&
3842 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3843 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3844 		ret = usb_submit_urb(urb, flags);
3845 		switch (ret) {
3846 		case 0:
3847 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3848 			break;
3849 		case -EPIPE:
3850 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3851 			break;
3852 		case -ENODEV:
3853 		case -ENOENT:
3854 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3855 			netif_device_detach(dev->net);
3856 			break;
3857 		case -EHOSTUNREACH:
3858 			ret = -ENOLINK;
3859 			napi_schedule(&dev->napi);
3860 			break;
3861 		default:
3862 			netif_dbg(dev, rx_err, dev->net,
3863 				  "rx submit, %d\n", ret);
3864 			napi_schedule(&dev->napi);
3865 			break;
3866 		}
3867 	} else {
3868 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3869 		ret = -ENOLINK;
3870 	}
3871 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3872 
3873 	if (ret)
3874 		lan78xx_release_rx_buf(dev, skb);
3875 
3876 	return ret;
3877 }
3878 
3879 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3880 {
3881 	struct sk_buff *rx_buf;
3882 
3883 	/* Ensure the maximum number of Rx URBs is submitted
3884 	 */
3885 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3886 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3887 			break;
3888 	}
3889 }
3890 
3891 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3892 				    struct sk_buff *rx_buf)
3893 {
3894 	/* reset SKB data pointers */
3895 
3896 	rx_buf->data = rx_buf->head;
3897 	skb_reset_tail_pointer(rx_buf);
3898 	rx_buf->len = 0;
3899 	rx_buf->data_len = 0;
3900 
3901 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3902 }
3903 
3904 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3905 {
3906 	u32 tx_cmd_a;
3907 	u32 tx_cmd_b;
3908 
3909 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3910 
3911 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3912 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3913 
3914 	tx_cmd_b = 0;
3915 	if (skb_is_gso(skb)) {
3916 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3917 
3918 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3919 
3920 		tx_cmd_a |= TX_CMD_A_LSO_;
3921 	}
3922 
3923 	if (skb_vlan_tag_present(skb)) {
3924 		tx_cmd_a |= TX_CMD_A_IVTG_;
3925 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3926 	}
3927 
3928 	put_unaligned_le32(tx_cmd_a, buffer);
3929 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3930 }
3931 
3932 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3933 					    struct sk_buff *tx_buf)
3934 {
3935 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3936 	int remain = dev->tx_urb_size;
3937 	u8 *tx_data = tx_buf->data;
3938 	u32 urb_len = 0;
3939 
3940 	entry->num_of_packet = 0;
3941 	entry->length = 0;
3942 
3943 	/* Work through the pending SKBs and copy the data of each SKB into
3944 	 * the URB buffer if there room for all the SKB data.
3945 	 *
3946 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3947 	 */
3948 	while (remain >= TX_SKB_MIN_LEN) {
3949 		unsigned int pending_bytes;
3950 		unsigned int align_bytes;
3951 		struct sk_buff *skb;
3952 		unsigned int len;
3953 
3954 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3955 
3956 		if (!skb)
3957 			break;
3958 
3959 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3960 			      TX_ALIGNMENT;
3961 		len = align_bytes + TX_CMD_LEN + skb->len;
3962 		if (len > remain) {
3963 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3964 			break;
3965 		}
3966 
3967 		tx_data += align_bytes;
3968 
3969 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3970 		tx_data += TX_CMD_LEN;
3971 
3972 		len = skb->len;
3973 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3974 			struct net_device_stats *stats = &dev->net->stats;
3975 
3976 			stats->tx_dropped++;
3977 			dev_kfree_skb_any(skb);
3978 			tx_data -= TX_CMD_LEN;
3979 			continue;
3980 		}
3981 
3982 		tx_data += len;
3983 		entry->length += len;
3984 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3985 
3986 		dev_kfree_skb_any(skb);
3987 
3988 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3989 
3990 		remain = dev->tx_urb_size - urb_len;
3991 	}
3992 
3993 	skb_put(tx_buf, urb_len);
3994 
3995 	return entry;
3996 }
3997 
3998 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3999 {
4000 	int ret;
4001 
4002 	/* Start the stack Tx queue if it was stopped
4003 	 */
4004 	netif_tx_lock(dev->net);
4005 	if (netif_queue_stopped(dev->net)) {
4006 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4007 			netif_wake_queue(dev->net);
4008 	}
4009 	netif_tx_unlock(dev->net);
4010 
4011 	/* Go through the Tx pending queue and set up URBs to transfer
4012 	 * the data to the device. Stop if no more pending data or URBs,
4013 	 * or if an error occurs when a URB is submitted.
4014 	 */
4015 	do {
4016 		struct skb_data *entry;
4017 		struct sk_buff *tx_buf;
4018 		unsigned long flags;
4019 
4020 		if (skb_queue_empty(&dev->txq_pend))
4021 			break;
4022 
4023 		tx_buf = lan78xx_get_tx_buf(dev);
4024 		if (!tx_buf)
4025 			break;
4026 
4027 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4028 
4029 		spin_lock_irqsave(&dev->txq.lock, flags);
4030 		ret = usb_autopm_get_interface_async(dev->intf);
4031 		if (ret < 0) {
4032 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4033 			goto out;
4034 		}
4035 
4036 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4037 				  tx_buf->data, tx_buf->len, tx_complete,
4038 				  tx_buf);
4039 
4040 		if (tx_buf->len % dev->maxpacket == 0) {
4041 			/* send USB_ZERO_PACKET */
4042 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4043 		}
4044 
4045 #ifdef CONFIG_PM
4046 		/* if device is asleep stop outgoing packet processing */
4047 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4048 			usb_anchor_urb(entry->urb, &dev->deferred);
4049 			netif_stop_queue(dev->net);
4050 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4051 			netdev_dbg(dev->net,
4052 				   "Delaying transmission for resumption\n");
4053 			return;
4054 		}
4055 #endif
4056 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4057 		switch (ret) {
4058 		case 0:
4059 			netif_trans_update(dev->net);
4060 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4061 			break;
4062 		case -EPIPE:
4063 			netif_stop_queue(dev->net);
4064 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4065 			usb_autopm_put_interface_async(dev->intf);
4066 			break;
4067 		case -ENODEV:
4068 		case -ENOENT:
4069 			netif_dbg(dev, tx_err, dev->net,
4070 				  "tx submit urb err %d (disconnected?)", ret);
4071 			netif_device_detach(dev->net);
4072 			break;
4073 		default:
4074 			usb_autopm_put_interface_async(dev->intf);
4075 			netif_dbg(dev, tx_err, dev->net,
4076 				  "tx submit urb err %d\n", ret);
4077 			break;
4078 		}
4079 
4080 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4081 
4082 		if (ret) {
4083 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4084 out:
4085 			dev->net->stats.tx_dropped += entry->num_of_packet;
4086 			lan78xx_release_tx_buf(dev, tx_buf);
4087 		}
4088 	} while (ret == 0);
4089 }
4090 
4091 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4092 {
4093 	struct sk_buff_head done;
4094 	struct sk_buff *rx_buf;
4095 	struct skb_data *entry;
4096 	unsigned long flags;
4097 	int work_done = 0;
4098 
4099 	/* Pass frames received in the last NAPI cycle before
4100 	 * working on newly completed URBs.
4101 	 */
4102 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4103 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4104 		++work_done;
4105 	}
4106 
4107 	/* Take a snapshot of the done queue and move items to a
4108 	 * temporary queue. Rx URB completions will continue to add
4109 	 * to the done queue.
4110 	 */
4111 	__skb_queue_head_init(&done);
4112 
4113 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4114 	skb_queue_splice_init(&dev->rxq_done, &done);
4115 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4116 
4117 	/* Extract receive frames from completed URBs and
4118 	 * pass them to the stack. Re-submit each completed URB.
4119 	 */
4120 	while ((work_done < budget) &&
4121 	       (rx_buf = __skb_dequeue(&done))) {
4122 		entry = (struct skb_data *)(rx_buf->cb);
4123 		switch (entry->state) {
4124 		case rx_done:
4125 			rx_process(dev, rx_buf, budget, &work_done);
4126 			break;
4127 		case rx_cleanup:
4128 			break;
4129 		default:
4130 			netdev_dbg(dev->net, "rx buf state %d\n",
4131 				   entry->state);
4132 			break;
4133 		}
4134 
4135 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4136 	}
4137 
4138 	/* If budget was consumed before processing all the URBs put them
4139 	 * back on the front of the done queue. They will be first to be
4140 	 * processed in the next NAPI cycle.
4141 	 */
4142 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4143 	skb_queue_splice(&done, &dev->rxq_done);
4144 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4145 
4146 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4147 		/* reset update timer delta */
4148 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4149 			dev->delta = 1;
4150 			mod_timer(&dev->stat_monitor,
4151 				  jiffies + STAT_UPDATE_TIMER);
4152 		}
4153 
4154 		/* Submit all free Rx URBs */
4155 
4156 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4157 			lan78xx_rx_urb_submit_all(dev);
4158 
4159 		/* Submit new Tx URBs */
4160 
4161 		lan78xx_tx_bh(dev);
4162 	}
4163 
4164 	return work_done;
4165 }
4166 
4167 static int lan78xx_poll(struct napi_struct *napi, int budget)
4168 {
4169 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4170 	int result = budget;
4171 	int work_done;
4172 
4173 	/* Don't do any work if the device is suspended */
4174 
4175 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4176 		napi_complete_done(napi, 0);
4177 		return 0;
4178 	}
4179 
4180 	/* Process completed URBs and submit new URBs */
4181 
4182 	work_done = lan78xx_bh(dev, budget);
4183 
4184 	if (work_done < budget) {
4185 		napi_complete_done(napi, work_done);
4186 
4187 		/* Start a new polling cycle if data was received or
4188 		 * data is waiting to be transmitted.
4189 		 */
4190 		if (!skb_queue_empty(&dev->rxq_done)) {
4191 			napi_schedule(napi);
4192 		} else if (netif_carrier_ok(dev->net)) {
4193 			if (skb_queue_empty(&dev->txq) &&
4194 			    !skb_queue_empty(&dev->txq_pend)) {
4195 				napi_schedule(napi);
4196 			} else {
4197 				netif_tx_lock(dev->net);
4198 				if (netif_queue_stopped(dev->net)) {
4199 					netif_wake_queue(dev->net);
4200 					napi_schedule(napi);
4201 				}
4202 				netif_tx_unlock(dev->net);
4203 			}
4204 		}
4205 		result = work_done;
4206 	}
4207 
4208 	return result;
4209 }
4210 
4211 static void lan78xx_delayedwork(struct work_struct *work)
4212 {
4213 	int status;
4214 	struct lan78xx_net *dev;
4215 
4216 	dev = container_of(work, struct lan78xx_net, wq.work);
4217 
4218 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4219 		return;
4220 
4221 	if (usb_autopm_get_interface(dev->intf) < 0)
4222 		return;
4223 
4224 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4225 		unlink_urbs(dev, &dev->txq);
4226 
4227 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4228 		if (status < 0 &&
4229 		    status != -EPIPE &&
4230 		    status != -ESHUTDOWN) {
4231 			if (netif_msg_tx_err(dev))
4232 				netdev_err(dev->net,
4233 					   "can't clear tx halt, status %d\n",
4234 					   status);
4235 		} else {
4236 			clear_bit(EVENT_TX_HALT, &dev->flags);
4237 			if (status != -ESHUTDOWN)
4238 				netif_wake_queue(dev->net);
4239 		}
4240 	}
4241 
4242 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4243 		unlink_urbs(dev, &dev->rxq);
4244 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4245 		if (status < 0 &&
4246 		    status != -EPIPE &&
4247 		    status != -ESHUTDOWN) {
4248 			if (netif_msg_rx_err(dev))
4249 				netdev_err(dev->net,
4250 					   "can't clear rx halt, status %d\n",
4251 					   status);
4252 		} else {
4253 			clear_bit(EVENT_RX_HALT, &dev->flags);
4254 			napi_schedule(&dev->napi);
4255 		}
4256 	}
4257 
4258 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4259 		int ret = 0;
4260 
4261 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4262 		if (lan78xx_link_reset(dev) < 0) {
4263 			netdev_info(dev->net, "link reset failed (%d)\n",
4264 				    ret);
4265 		}
4266 	}
4267 
4268 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4269 		lan78xx_update_stats(dev);
4270 
4271 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4272 
4273 		mod_timer(&dev->stat_monitor,
4274 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4275 
4276 		dev->delta = min((dev->delta * 2), 50);
4277 	}
4278 
4279 	usb_autopm_put_interface(dev->intf);
4280 }
4281 
4282 static void intr_complete(struct urb *urb)
4283 {
4284 	struct lan78xx_net *dev = urb->context;
4285 	int status = urb->status;
4286 
4287 	switch (status) {
4288 	/* success */
4289 	case 0:
4290 		lan78xx_status(dev, urb);
4291 		break;
4292 
4293 	/* software-driven interface shutdown */
4294 	case -ENOENT:			/* urb killed */
4295 	case -ENODEV:			/* hardware gone */
4296 	case -ESHUTDOWN:		/* hardware gone */
4297 		netif_dbg(dev, ifdown, dev->net,
4298 			  "intr shutdown, code %d\n", status);
4299 		return;
4300 
4301 	/* NOTE:  not throttling like RX/TX, since this endpoint
4302 	 * already polls infrequently
4303 	 */
4304 	default:
4305 		netdev_dbg(dev->net, "intr status %d\n", status);
4306 		break;
4307 	}
4308 
4309 	if (!netif_device_present(dev->net) ||
4310 	    !netif_running(dev->net)) {
4311 		netdev_warn(dev->net, "not submitting new status URB");
4312 		return;
4313 	}
4314 
4315 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4316 	status = usb_submit_urb(urb, GFP_ATOMIC);
4317 
4318 	switch (status) {
4319 	case  0:
4320 		break;
4321 	case -ENODEV:
4322 	case -ENOENT:
4323 		netif_dbg(dev, timer, dev->net,
4324 			  "intr resubmit %d (disconnect?)", status);
4325 		netif_device_detach(dev->net);
4326 		break;
4327 	default:
4328 		netif_err(dev, timer, dev->net,
4329 			  "intr resubmit --> %d\n", status);
4330 		break;
4331 	}
4332 }
4333 
4334 static void lan78xx_disconnect(struct usb_interface *intf)
4335 {
4336 	struct lan78xx_net *dev;
4337 	struct usb_device *udev;
4338 	struct net_device *net;
4339 	struct phy_device *phydev;
4340 
4341 	dev = usb_get_intfdata(intf);
4342 	usb_set_intfdata(intf, NULL);
4343 	if (!dev)
4344 		return;
4345 
4346 	netif_napi_del(&dev->napi);
4347 
4348 	udev = interface_to_usbdev(intf);
4349 	net = dev->net;
4350 
4351 	unregister_netdev(net);
4352 
4353 	timer_shutdown_sync(&dev->stat_monitor);
4354 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4355 	cancel_delayed_work_sync(&dev->wq);
4356 
4357 	phydev = net->phydev;
4358 
4359 	phy_disconnect(net->phydev);
4360 
4361 	if (phy_is_pseudo_fixed_link(phydev)) {
4362 		fixed_phy_unregister(phydev);
4363 		phy_device_free(phydev);
4364 	}
4365 
4366 	usb_scuttle_anchored_urbs(&dev->deferred);
4367 
4368 	lan78xx_unbind(dev, intf);
4369 
4370 	lan78xx_free_tx_resources(dev);
4371 	lan78xx_free_rx_resources(dev);
4372 
4373 	usb_kill_urb(dev->urb_intr);
4374 	usb_free_urb(dev->urb_intr);
4375 
4376 	free_netdev(net);
4377 	usb_put_dev(udev);
4378 }
4379 
4380 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4381 {
4382 	struct lan78xx_net *dev = netdev_priv(net);
4383 
4384 	unlink_urbs(dev, &dev->txq);
4385 	napi_schedule(&dev->napi);
4386 }
4387 
4388 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4389 						struct net_device *netdev,
4390 						netdev_features_t features)
4391 {
4392 	struct lan78xx_net *dev = netdev_priv(netdev);
4393 
4394 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4395 		features &= ~NETIF_F_GSO_MASK;
4396 
4397 	features = vlan_features_check(skb, features);
4398 	features = vxlan_features_check(skb, features);
4399 
4400 	return features;
4401 }
4402 
4403 static const struct net_device_ops lan78xx_netdev_ops = {
4404 	.ndo_open		= lan78xx_open,
4405 	.ndo_stop		= lan78xx_stop,
4406 	.ndo_start_xmit		= lan78xx_start_xmit,
4407 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4408 	.ndo_change_mtu		= lan78xx_change_mtu,
4409 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4410 	.ndo_validate_addr	= eth_validate_addr,
4411 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4412 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4413 	.ndo_set_features	= lan78xx_set_features,
4414 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4415 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4416 	.ndo_features_check	= lan78xx_features_check,
4417 };
4418 
4419 static void lan78xx_stat_monitor(struct timer_list *t)
4420 {
4421 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4422 
4423 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4424 }
4425 
4426 static int lan78xx_probe(struct usb_interface *intf,
4427 			 const struct usb_device_id *id)
4428 {
4429 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4430 	struct lan78xx_net *dev;
4431 	struct net_device *netdev;
4432 	struct usb_device *udev;
4433 	int ret;
4434 	unsigned int maxp;
4435 	unsigned int period;
4436 	u8 *buf = NULL;
4437 
4438 	udev = interface_to_usbdev(intf);
4439 	udev = usb_get_dev(udev);
4440 
4441 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4442 	if (!netdev) {
4443 		dev_err(&intf->dev, "Error: OOM\n");
4444 		ret = -ENOMEM;
4445 		goto out1;
4446 	}
4447 
4448 	/* netdev_printk() needs this */
4449 	SET_NETDEV_DEV(netdev, &intf->dev);
4450 
4451 	dev = netdev_priv(netdev);
4452 	dev->udev = udev;
4453 	dev->intf = intf;
4454 	dev->net = netdev;
4455 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4456 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4457 
4458 	skb_queue_head_init(&dev->rxq);
4459 	skb_queue_head_init(&dev->txq);
4460 	skb_queue_head_init(&dev->rxq_done);
4461 	skb_queue_head_init(&dev->txq_pend);
4462 	skb_queue_head_init(&dev->rxq_overflow);
4463 	mutex_init(&dev->mdiobus_mutex);
4464 	mutex_init(&dev->dev_mutex);
4465 
4466 	ret = lan78xx_urb_config_init(dev);
4467 	if (ret < 0)
4468 		goto out2;
4469 
4470 	ret = lan78xx_alloc_tx_resources(dev);
4471 	if (ret < 0)
4472 		goto out2;
4473 
4474 	ret = lan78xx_alloc_rx_resources(dev);
4475 	if (ret < 0)
4476 		goto out3;
4477 
4478 	/* MTU range: 68 - 9000 */
4479 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4480 
4481 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4482 
4483 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4484 
4485 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4486 	init_usb_anchor(&dev->deferred);
4487 
4488 	netdev->netdev_ops = &lan78xx_netdev_ops;
4489 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4490 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4491 
4492 	dev->delta = 1;
4493 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4494 
4495 	mutex_init(&dev->stats.access_lock);
4496 
4497 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4498 		ret = -ENODEV;
4499 		goto out4;
4500 	}
4501 
4502 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4503 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4504 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4505 		ret = -ENODEV;
4506 		goto out4;
4507 	}
4508 
4509 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4510 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4511 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4512 		ret = -ENODEV;
4513 		goto out4;
4514 	}
4515 
4516 	ep_intr = &intf->cur_altsetting->endpoint[2];
4517 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4518 		ret = -ENODEV;
4519 		goto out4;
4520 	}
4521 
4522 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4523 					usb_endpoint_num(&ep_intr->desc));
4524 
4525 	ret = lan78xx_bind(dev, intf);
4526 	if (ret < 0)
4527 		goto out4;
4528 
4529 	period = ep_intr->desc.bInterval;
4530 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4531 
4532 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4533 	if (!dev->urb_intr) {
4534 		ret = -ENOMEM;
4535 		goto out5;
4536 	}
4537 
4538 	buf = kmalloc(maxp, GFP_KERNEL);
4539 	if (!buf) {
4540 		ret = -ENOMEM;
4541 		goto free_urbs;
4542 	}
4543 
4544 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4545 			 dev->pipe_intr, buf, maxp,
4546 			 intr_complete, dev, period);
4547 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4548 
4549 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4550 
4551 	/* Reject broken descriptors. */
4552 	if (dev->maxpacket == 0) {
4553 		ret = -ENODEV;
4554 		goto free_urbs;
4555 	}
4556 
4557 	/* driver requires remote-wakeup capability during autosuspend. */
4558 	intf->needs_remote_wakeup = 1;
4559 
4560 	ret = lan78xx_phy_init(dev);
4561 	if (ret < 0)
4562 		goto free_urbs;
4563 
4564 	ret = register_netdev(netdev);
4565 	if (ret != 0) {
4566 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4567 		goto out8;
4568 	}
4569 
4570 	usb_set_intfdata(intf, dev);
4571 
4572 	ret = device_set_wakeup_enable(&udev->dev, true);
4573 
4574 	 /* Default delay of 2sec has more overhead than advantage.
4575 	  * Set to 10sec as default.
4576 	  */
4577 	pm_runtime_set_autosuspend_delay(&udev->dev,
4578 					 DEFAULT_AUTOSUSPEND_DELAY);
4579 
4580 	return 0;
4581 
4582 out8:
4583 	phy_disconnect(netdev->phydev);
4584 free_urbs:
4585 	usb_free_urb(dev->urb_intr);
4586 out5:
4587 	lan78xx_unbind(dev, intf);
4588 out4:
4589 	netif_napi_del(&dev->napi);
4590 	lan78xx_free_rx_resources(dev);
4591 out3:
4592 	lan78xx_free_tx_resources(dev);
4593 out2:
4594 	free_netdev(netdev);
4595 out1:
4596 	usb_put_dev(udev);
4597 
4598 	return ret;
4599 }
4600 
4601 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4602 {
4603 	const u16 crc16poly = 0x8005;
4604 	int i;
4605 	u16 bit, crc, msb;
4606 	u8 data;
4607 
4608 	crc = 0xFFFF;
4609 	for (i = 0; i < len; i++) {
4610 		data = *buf++;
4611 		for (bit = 0; bit < 8; bit++) {
4612 			msb = crc >> 15;
4613 			crc <<= 1;
4614 
4615 			if (msb ^ (u16)(data & 1)) {
4616 				crc ^= crc16poly;
4617 				crc |= (u16)0x0001U;
4618 			}
4619 			data >>= 1;
4620 		}
4621 	}
4622 
4623 	return crc;
4624 }
4625 
4626 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4627 {
4628 	u32 buf;
4629 	int ret;
4630 
4631 	ret = lan78xx_stop_tx_path(dev);
4632 	if (ret < 0)
4633 		return ret;
4634 
4635 	ret = lan78xx_stop_rx_path(dev);
4636 	if (ret < 0)
4637 		return ret;
4638 
4639 	/* auto suspend (selective suspend) */
4640 
4641 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4642 	if (ret < 0)
4643 		return ret;
4644 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4645 	if (ret < 0)
4646 		return ret;
4647 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4648 	if (ret < 0)
4649 		return ret;
4650 
4651 	/* set goodframe wakeup */
4652 
4653 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4654 	if (ret < 0)
4655 		return ret;
4656 
4657 	buf |= WUCSR_RFE_WAKE_EN_;
4658 	buf |= WUCSR_STORE_WAKE_;
4659 
4660 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4661 	if (ret < 0)
4662 		return ret;
4663 
4664 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4665 	if (ret < 0)
4666 		return ret;
4667 
4668 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4669 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4670 	buf |= PMT_CTL_PHY_WAKE_EN_;
4671 	buf |= PMT_CTL_WOL_EN_;
4672 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4673 	buf |= PMT_CTL_SUS_MODE_3_;
4674 
4675 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4676 	if (ret < 0)
4677 		return ret;
4678 
4679 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4680 	if (ret < 0)
4681 		return ret;
4682 
4683 	buf |= PMT_CTL_WUPS_MASK_;
4684 
4685 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4686 	if (ret < 0)
4687 		return ret;
4688 
4689 	ret = lan78xx_start_rx_path(dev);
4690 
4691 	return ret;
4692 }
4693 
4694 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4695 {
4696 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4697 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4698 	const u8 arp_type[2] = { 0x08, 0x06 };
4699 	u32 temp_pmt_ctl;
4700 	int mask_index;
4701 	u32 temp_wucsr;
4702 	u32 buf;
4703 	u16 crc;
4704 	int ret;
4705 
4706 	ret = lan78xx_stop_tx_path(dev);
4707 	if (ret < 0)
4708 		return ret;
4709 	ret = lan78xx_stop_rx_path(dev);
4710 	if (ret < 0)
4711 		return ret;
4712 
4713 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4714 	if (ret < 0)
4715 		return ret;
4716 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4717 	if (ret < 0)
4718 		return ret;
4719 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4720 	if (ret < 0)
4721 		return ret;
4722 
4723 	temp_wucsr = 0;
4724 
4725 	temp_pmt_ctl = 0;
4726 
4727 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4728 	if (ret < 0)
4729 		return ret;
4730 
4731 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4732 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4733 
4734 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4735 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4736 		if (ret < 0)
4737 			return ret;
4738 	}
4739 
4740 	mask_index = 0;
4741 	if (wol & WAKE_PHY) {
4742 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4743 
4744 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4745 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4746 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4747 	}
4748 	if (wol & WAKE_MAGIC) {
4749 		temp_wucsr |= WUCSR_MPEN_;
4750 
4751 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4752 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4753 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4754 	}
4755 	if (wol & WAKE_BCAST) {
4756 		temp_wucsr |= WUCSR_BCST_EN_;
4757 
4758 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4759 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4760 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4761 	}
4762 	if (wol & WAKE_MCAST) {
4763 		temp_wucsr |= WUCSR_WAKE_EN_;
4764 
4765 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4766 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4767 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4768 					WUF_CFGX_EN_ |
4769 					WUF_CFGX_TYPE_MCAST_ |
4770 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4771 					(crc & WUF_CFGX_CRC16_MASK_));
4772 		if (ret < 0)
4773 			return ret;
4774 
4775 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4776 		if (ret < 0)
4777 			return ret;
4778 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4779 		if (ret < 0)
4780 			return ret;
4781 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4782 		if (ret < 0)
4783 			return ret;
4784 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4785 		if (ret < 0)
4786 			return ret;
4787 
4788 		mask_index++;
4789 
4790 		/* for IPv6 Multicast */
4791 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4792 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4793 					WUF_CFGX_EN_ |
4794 					WUF_CFGX_TYPE_MCAST_ |
4795 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4796 					(crc & WUF_CFGX_CRC16_MASK_));
4797 		if (ret < 0)
4798 			return ret;
4799 
4800 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4801 		if (ret < 0)
4802 			return ret;
4803 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4804 		if (ret < 0)
4805 			return ret;
4806 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4807 		if (ret < 0)
4808 			return ret;
4809 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4810 		if (ret < 0)
4811 			return ret;
4812 
4813 		mask_index++;
4814 
4815 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4816 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4817 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4818 	}
4819 	if (wol & WAKE_UCAST) {
4820 		temp_wucsr |= WUCSR_PFDA_EN_;
4821 
4822 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4823 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4824 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4825 	}
4826 	if (wol & WAKE_ARP) {
4827 		temp_wucsr |= WUCSR_WAKE_EN_;
4828 
4829 		/* set WUF_CFG & WUF_MASK
4830 		 * for packettype (offset 12,13) = ARP (0x0806)
4831 		 */
4832 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4833 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4834 					WUF_CFGX_EN_ |
4835 					WUF_CFGX_TYPE_ALL_ |
4836 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4837 					(crc & WUF_CFGX_CRC16_MASK_));
4838 		if (ret < 0)
4839 			return ret;
4840 
4841 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4842 		if (ret < 0)
4843 			return ret;
4844 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4845 		if (ret < 0)
4846 			return ret;
4847 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4848 		if (ret < 0)
4849 			return ret;
4850 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4851 		if (ret < 0)
4852 			return ret;
4853 
4854 		mask_index++;
4855 
4856 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4857 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4858 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4859 	}
4860 
4861 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4862 	if (ret < 0)
4863 		return ret;
4864 
4865 	/* when multiple WOL bits are set */
4866 	if (hweight_long((unsigned long)wol) > 1) {
4867 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4868 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4869 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4870 	}
4871 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4872 	if (ret < 0)
4873 		return ret;
4874 
4875 	/* clear WUPS */
4876 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4877 	if (ret < 0)
4878 		return ret;
4879 
4880 	buf |= PMT_CTL_WUPS_MASK_;
4881 
4882 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4883 	if (ret < 0)
4884 		return ret;
4885 
4886 	ret = lan78xx_start_rx_path(dev);
4887 
4888 	return ret;
4889 }
4890 
4891 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4892 {
4893 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4894 	bool dev_open;
4895 	int ret;
4896 
4897 	mutex_lock(&dev->dev_mutex);
4898 
4899 	netif_dbg(dev, ifdown, dev->net,
4900 		  "suspending: pm event %#x", message.event);
4901 
4902 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4903 
4904 	if (dev_open) {
4905 		spin_lock_irq(&dev->txq.lock);
4906 		/* don't autosuspend while transmitting */
4907 		if ((skb_queue_len(&dev->txq) ||
4908 		     skb_queue_len(&dev->txq_pend)) &&
4909 		    PMSG_IS_AUTO(message)) {
4910 			spin_unlock_irq(&dev->txq.lock);
4911 			ret = -EBUSY;
4912 			goto out;
4913 		} else {
4914 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4915 			spin_unlock_irq(&dev->txq.lock);
4916 		}
4917 
4918 		/* stop RX */
4919 		ret = lan78xx_stop_rx_path(dev);
4920 		if (ret < 0)
4921 			goto out;
4922 
4923 		ret = lan78xx_flush_rx_fifo(dev);
4924 		if (ret < 0)
4925 			goto out;
4926 
4927 		/* stop Tx */
4928 		ret = lan78xx_stop_tx_path(dev);
4929 		if (ret < 0)
4930 			goto out;
4931 
4932 		/* empty out the Rx and Tx queues */
4933 		netif_device_detach(dev->net);
4934 		lan78xx_terminate_urbs(dev);
4935 		usb_kill_urb(dev->urb_intr);
4936 
4937 		/* reattach */
4938 		netif_device_attach(dev->net);
4939 
4940 		timer_delete(&dev->stat_monitor);
4941 
4942 		if (PMSG_IS_AUTO(message)) {
4943 			ret = lan78xx_set_auto_suspend(dev);
4944 			if (ret < 0)
4945 				goto out;
4946 		} else {
4947 			struct lan78xx_priv *pdata;
4948 
4949 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4950 			netif_carrier_off(dev->net);
4951 			ret = lan78xx_set_suspend(dev, pdata->wol);
4952 			if (ret < 0)
4953 				goto out;
4954 		}
4955 	} else {
4956 		/* Interface is down; don't allow WOL and PHY
4957 		 * events to wake up the host
4958 		 */
4959 		u32 buf;
4960 
4961 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4962 
4963 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4964 		if (ret < 0)
4965 			goto out;
4966 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4967 		if (ret < 0)
4968 			goto out;
4969 
4970 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4971 		if (ret < 0)
4972 			goto out;
4973 
4974 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4975 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4976 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4977 		buf |= PMT_CTL_SUS_MODE_3_;
4978 
4979 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4980 		if (ret < 0)
4981 			goto out;
4982 
4983 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4984 		if (ret < 0)
4985 			goto out;
4986 
4987 		buf |= PMT_CTL_WUPS_MASK_;
4988 
4989 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4990 		if (ret < 0)
4991 			goto out;
4992 	}
4993 
4994 	ret = 0;
4995 out:
4996 	mutex_unlock(&dev->dev_mutex);
4997 
4998 	return ret;
4999 }
5000 
5001 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5002 {
5003 	bool pipe_halted = false;
5004 	struct urb *urb;
5005 
5006 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5007 		struct sk_buff *skb = urb->context;
5008 		int ret;
5009 
5010 		if (!netif_device_present(dev->net) ||
5011 		    !netif_carrier_ok(dev->net) ||
5012 		    pipe_halted) {
5013 			lan78xx_release_tx_buf(dev, skb);
5014 			continue;
5015 		}
5016 
5017 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5018 
5019 		if (ret == 0) {
5020 			netif_trans_update(dev->net);
5021 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5022 		} else {
5023 			if (ret == -EPIPE) {
5024 				netif_stop_queue(dev->net);
5025 				pipe_halted = true;
5026 			} else if (ret == -ENODEV) {
5027 				netif_device_detach(dev->net);
5028 			}
5029 
5030 			lan78xx_release_tx_buf(dev, skb);
5031 		}
5032 	}
5033 
5034 	return pipe_halted;
5035 }
5036 
5037 static int lan78xx_resume(struct usb_interface *intf)
5038 {
5039 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5040 	bool dev_open;
5041 	int ret;
5042 
5043 	mutex_lock(&dev->dev_mutex);
5044 
5045 	netif_dbg(dev, ifup, dev->net, "resuming device");
5046 
5047 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5048 
5049 	if (dev_open) {
5050 		bool pipe_halted = false;
5051 
5052 		ret = lan78xx_flush_tx_fifo(dev);
5053 		if (ret < 0)
5054 			goto out;
5055 
5056 		if (dev->urb_intr) {
5057 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5058 
5059 			if (ret < 0) {
5060 				if (ret == -ENODEV)
5061 					netif_device_detach(dev->net);
5062 				netdev_warn(dev->net, "Failed to submit intr URB");
5063 			}
5064 		}
5065 
5066 		spin_lock_irq(&dev->txq.lock);
5067 
5068 		if (netif_device_present(dev->net)) {
5069 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5070 
5071 			if (pipe_halted)
5072 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5073 		}
5074 
5075 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5076 
5077 		spin_unlock_irq(&dev->txq.lock);
5078 
5079 		if (!pipe_halted &&
5080 		    netif_device_present(dev->net) &&
5081 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5082 			netif_start_queue(dev->net);
5083 
5084 		ret = lan78xx_start_tx_path(dev);
5085 		if (ret < 0)
5086 			goto out;
5087 
5088 		napi_schedule(&dev->napi);
5089 
5090 		if (!timer_pending(&dev->stat_monitor)) {
5091 			dev->delta = 1;
5092 			mod_timer(&dev->stat_monitor,
5093 				  jiffies + STAT_UPDATE_TIMER);
5094 		}
5095 
5096 	} else {
5097 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5098 	}
5099 
5100 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5101 	if (ret < 0)
5102 		goto out;
5103 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5104 	if (ret < 0)
5105 		goto out;
5106 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5107 	if (ret < 0)
5108 		goto out;
5109 
5110 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5111 					     WUCSR2_ARP_RCD_ |
5112 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5113 					     WUCSR2_IPV4_TCPSYN_RCD_);
5114 	if (ret < 0)
5115 		goto out;
5116 
5117 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5118 					    WUCSR_EEE_RX_WAKE_ |
5119 					    WUCSR_PFDA_FR_ |
5120 					    WUCSR_RFE_WAKE_FR_ |
5121 					    WUCSR_WUFR_ |
5122 					    WUCSR_MPR_ |
5123 					    WUCSR_BCST_FR_);
5124 	if (ret < 0)
5125 		goto out;
5126 
5127 	ret = 0;
5128 out:
5129 	mutex_unlock(&dev->dev_mutex);
5130 
5131 	return ret;
5132 }
5133 
5134 static int lan78xx_reset_resume(struct usb_interface *intf)
5135 {
5136 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5137 	int ret;
5138 
5139 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5140 
5141 	ret = lan78xx_reset(dev);
5142 	if (ret < 0)
5143 		return ret;
5144 
5145 	phy_start(dev->net->phydev);
5146 
5147 	ret = lan78xx_resume(intf);
5148 
5149 	return ret;
5150 }
5151 
5152 static const struct usb_device_id products[] = {
5153 	{
5154 	/* LAN7800 USB Gigabit Ethernet Device */
5155 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5156 	},
5157 	{
5158 	/* LAN7850 USB Gigabit Ethernet Device */
5159 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5160 	},
5161 	{
5162 	/* LAN7801 USB Gigabit Ethernet Device */
5163 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5164 	},
5165 	{
5166 	/* ATM2-AF USB Gigabit Ethernet Device */
5167 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5168 	},
5169 	{},
5170 };
5171 MODULE_DEVICE_TABLE(usb, products);
5172 
5173 static struct usb_driver lan78xx_driver = {
5174 	.name			= DRIVER_NAME,
5175 	.id_table		= products,
5176 	.probe			= lan78xx_probe,
5177 	.disconnect		= lan78xx_disconnect,
5178 	.suspend		= lan78xx_suspend,
5179 	.resume			= lan78xx_resume,
5180 	.reset_resume		= lan78xx_reset_resume,
5181 	.supports_autosuspend	= 1,
5182 	.disable_hub_initiated_lpm = 1,
5183 };
5184 
5185 module_usb_driver(lan78xx_driver);
5186 
5187 MODULE_AUTHOR(DRIVER_AUTHOR);
5188 MODULE_DESCRIPTION(DRIVER_DESC);
5189 MODULE_LICENSE("GPL");
5190