xref: /linux/drivers/net/usb/lan78xx.c (revision 7a7c52645ce62314cdd69815e9d8fcb33e0042d5)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/selftests.h>
24 #include <net/vxlan.h>
25 #include <linux/interrupt.h>
26 #include <linux/irqdomain.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/microchipphy.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1084 		return lan78xx_write_reg(dev, HW_CFG, saved);
1085 
1086 	return 0;
1087 }
1088 
1089 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1090 			       u32 length, u8 *data)
1091 {
1092 	int ret;
1093 	u8 sig;
1094 
1095 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1096 	if (ret < 0)
1097 		return ret;
1098 
1099 	if (sig != EEPROM_INDICATOR)
1100 		return -ENODATA;
1101 
1102 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1103 }
1104 
1105 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1106 				    u32 length, u8 *data)
1107 {
1108 	u32 val;
1109 	u32 saved;
1110 	int i, ret;
1111 
1112 	/* depends on chip, some EEPROM pins are muxed with LED function.
1113 	 * disable & restore LED function to access EEPROM.
1114 	 */
1115 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1116 	if (ret < 0)
1117 		return ret;
1118 
1119 	saved = val;
1120 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1121 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1122 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1123 		if (ret < 0)
1124 			return ret;
1125 	}
1126 
1127 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1128 	/* Looks like not USB specific error, try to recover */
1129 	if (ret == -ETIMEDOUT)
1130 		goto write_raw_eeprom_done;
1131 	/* If USB fails, there is nothing to do */
1132 	if (ret < 0)
1133 		return ret;
1134 
1135 	/* Issue write/erase enable command */
1136 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1137 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1138 	if (ret < 0)
1139 		return ret;
1140 
1141 	ret = lan78xx_wait_eeprom(dev);
1142 	/* Looks like not USB specific error, try to recover */
1143 	if (ret == -ETIMEDOUT)
1144 		goto write_raw_eeprom_done;
1145 	/* If USB fails, there is nothing to do */
1146 	if (ret < 0)
1147 		return ret;
1148 
1149 	for (i = 0; i < length; i++) {
1150 		/* Fill data register */
1151 		val = data[i];
1152 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1153 		if (ret < 0)
1154 			return ret;
1155 
1156 		/* Send "write" command */
1157 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1158 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1159 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1160 		if (ret < 0)
1161 			return ret;
1162 
1163 		ret = lan78xx_wait_eeprom(dev);
1164 		/* Looks like not USB specific error, try to recover */
1165 		if (ret == -ETIMEDOUT)
1166 			goto write_raw_eeprom_done;
1167 		/* If USB fails, there is nothing to do */
1168 		if (ret < 0)
1169 			return ret;
1170 
1171 		offset++;
1172 	}
1173 
1174 write_raw_eeprom_done:
1175 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1176 		return lan78xx_write_reg(dev, HW_CFG, saved);
1177 
1178 	return 0;
1179 }
1180 
1181 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1182 				u32 length, u8 *data)
1183 {
1184 	unsigned long timeout;
1185 	int ret, i;
1186 	u32 buf;
1187 
1188 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1189 	if (ret < 0)
1190 		return ret;
1191 
1192 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1193 		/* clear it and wait to be cleared */
1194 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1195 		if (ret < 0)
1196 			return ret;
1197 
1198 		timeout = jiffies + HZ;
1199 		do {
1200 			usleep_range(1, 10);
1201 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1202 			if (ret < 0)
1203 				return ret;
1204 
1205 			if (time_after(jiffies, timeout)) {
1206 				netdev_warn(dev->net,
1207 					    "timeout on OTP_PWR_DN");
1208 				return -ETIMEDOUT;
1209 			}
1210 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1211 	}
1212 
1213 	for (i = 0; i < length; i++) {
1214 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1215 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1216 		if (ret < 0)
1217 			return ret;
1218 
1219 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1220 					((offset + i) & OTP_ADDR2_10_3));
1221 		if (ret < 0)
1222 			return ret;
1223 
1224 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		timeout = jiffies + HZ;
1233 		do {
1234 			udelay(1);
1235 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1236 			if (ret < 0)
1237 				return ret;
1238 
1239 			if (time_after(jiffies, timeout)) {
1240 				netdev_warn(dev->net,
1241 					    "timeout on OTP_STATUS");
1242 				return -ETIMEDOUT;
1243 			}
1244 		} while (buf & OTP_STATUS_BUSY_);
1245 
1246 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1247 		if (ret < 0)
1248 			return ret;
1249 
1250 		data[i] = (u8)(buf & 0xFF);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1257 				 u32 length, u8 *data)
1258 {
1259 	int i;
1260 	u32 buf;
1261 	unsigned long timeout;
1262 	int ret;
1263 
1264 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1265 	if (ret < 0)
1266 		return ret;
1267 
1268 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1269 		/* clear it and wait to be cleared */
1270 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1271 		if (ret < 0)
1272 			return ret;
1273 
1274 		timeout = jiffies + HZ;
1275 		do {
1276 			udelay(1);
1277 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1278 			if (ret < 0)
1279 				return ret;
1280 
1281 			if (time_after(jiffies, timeout)) {
1282 				netdev_warn(dev->net,
1283 					    "timeout on OTP_PWR_DN completion");
1284 				return -ETIMEDOUT;
1285 			}
1286 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1287 	}
1288 
1289 	/* set to BYTE program mode */
1290 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1291 	if (ret < 0)
1292 		return ret;
1293 
1294 	for (i = 0; i < length; i++) {
1295 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1296 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1297 		if (ret < 0)
1298 			return ret;
1299 
1300 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1301 					((offset + i) & OTP_ADDR2_10_3));
1302 		if (ret < 0)
1303 			return ret;
1304 
1305 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		timeout = jiffies + HZ;
1318 		do {
1319 			udelay(1);
1320 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1321 			if (ret < 0)
1322 				return ret;
1323 
1324 			if (time_after(jiffies, timeout)) {
1325 				netdev_warn(dev->net,
1326 					    "Timeout on OTP_STATUS completion");
1327 				return -ETIMEDOUT;
1328 			}
1329 		} while (buf & OTP_STATUS_BUSY_);
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1336 			    u32 length, u8 *data)
1337 {
1338 	u8 sig;
1339 	int ret;
1340 
1341 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1342 
1343 	if (ret == 0) {
1344 		if (sig == OTP_INDICATOR_2)
1345 			offset += 0x100;
1346 		else if (sig != OTP_INDICATOR_1)
1347 			ret = -EINVAL;
1348 		if (!ret)
1349 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1350 	}
1351 
1352 	return ret;
1353 }
1354 
1355 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1356 {
1357 	int i, ret;
1358 
1359 	for (i = 0; i < 100; i++) {
1360 		u32 dp_sel;
1361 
1362 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1363 		if (unlikely(ret < 0))
1364 			return ret;
1365 
1366 		if (dp_sel & DP_SEL_DPRDY_)
1367 			return 0;
1368 
1369 		usleep_range(40, 100);
1370 	}
1371 
1372 	netdev_warn(dev->net, "%s timed out", __func__);
1373 
1374 	return -ETIMEDOUT;
1375 }
1376 
1377 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1378 				  u32 addr, u32 length, u32 *buf)
1379 {
1380 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1381 	int i, ret;
1382 
1383 	ret = usb_autopm_get_interface(dev->intf);
1384 	if (ret < 0)
1385 		return ret;
1386 
1387 	mutex_lock(&pdata->dataport_mutex);
1388 
1389 	ret = lan78xx_dataport_wait_not_busy(dev);
1390 	if (ret < 0)
1391 		goto dataport_write;
1392 
1393 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	for (i = 0; i < length; i++) {
1398 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1399 		if (ret < 0)
1400 			goto dataport_write;
1401 
1402 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_dataport_wait_not_busy(dev);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 	}
1414 
1415 dataport_write:
1416 	if (ret < 0)
1417 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1418 
1419 	mutex_unlock(&pdata->dataport_mutex);
1420 	usb_autopm_put_interface(dev->intf);
1421 
1422 	return ret;
1423 }
1424 
1425 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1426 				    int index, u8 addr[ETH_ALEN])
1427 {
1428 	u32 temp;
1429 
1430 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1431 		temp = addr[3];
1432 		temp = addr[2] | (temp << 8);
1433 		temp = addr[1] | (temp << 8);
1434 		temp = addr[0] | (temp << 8);
1435 		pdata->pfilter_table[index][1] = temp;
1436 		temp = addr[5];
1437 		temp = addr[4] | (temp << 8);
1438 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1439 		pdata->pfilter_table[index][0] = temp;
1440 	}
1441 }
1442 
1443 /* returns hash bit number for given MAC address */
1444 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1445 {
1446 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1447 }
1448 
1449 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1450 {
1451 	struct lan78xx_priv *pdata =
1452 			container_of(param, struct lan78xx_priv, set_multicast);
1453 	struct lan78xx_net *dev = pdata->dev;
1454 	int i, ret;
1455 
1456 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1457 		  pdata->rfe_ctl);
1458 
1459 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1460 				     DP_SEL_VHF_VLAN_LEN,
1461 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1462 	if (ret < 0)
1463 		goto multicast_write_done;
1464 
1465 	for (i = 1; i < NUM_OF_MAF; i++) {
1466 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1467 		if (ret < 0)
1468 			goto multicast_write_done;
1469 
1470 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1471 					pdata->pfilter_table[i][1]);
1472 		if (ret < 0)
1473 			goto multicast_write_done;
1474 
1475 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1476 					pdata->pfilter_table[i][0]);
1477 		if (ret < 0)
1478 			goto multicast_write_done;
1479 	}
1480 
1481 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1482 
1483 multicast_write_done:
1484 	if (ret < 0)
1485 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1486 	return;
1487 }
1488 
1489 static void lan78xx_set_multicast(struct net_device *netdev)
1490 {
1491 	struct lan78xx_net *dev = netdev_priv(netdev);
1492 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1493 	unsigned long flags;
1494 	int i;
1495 
1496 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1497 
1498 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1499 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1500 
1501 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1502 		pdata->mchash_table[i] = 0;
1503 
1504 	/* pfilter_table[0] has own HW address */
1505 	for (i = 1; i < NUM_OF_MAF; i++) {
1506 		pdata->pfilter_table[i][0] = 0;
1507 		pdata->pfilter_table[i][1] = 0;
1508 	}
1509 
1510 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1511 
1512 	if (dev->net->flags & IFF_PROMISC) {
1513 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1514 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1515 	} else {
1516 		if (dev->net->flags & IFF_ALLMULTI) {
1517 			netif_dbg(dev, drv, dev->net,
1518 				  "receive all multicast enabled");
1519 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1520 		}
1521 	}
1522 
1523 	if (netdev_mc_count(dev->net)) {
1524 		struct netdev_hw_addr *ha;
1525 		int i;
1526 
1527 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1528 
1529 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1530 
1531 		i = 1;
1532 		netdev_for_each_mc_addr(ha, netdev) {
1533 			/* set first 32 into Perfect Filter */
1534 			if (i < 33) {
1535 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1536 			} else {
1537 				u32 bitnum = lan78xx_hash(ha->addr);
1538 
1539 				pdata->mchash_table[bitnum / 32] |=
1540 							(1 << (bitnum % 32));
1541 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1542 			}
1543 			i++;
1544 		}
1545 	}
1546 
1547 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1548 
1549 	/* defer register writes to a sleepable context */
1550 	schedule_work(&pdata->set_multicast);
1551 }
1552 
1553 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1554 
1555 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1556 {
1557 	unsigned long start_time = jiffies;
1558 	u32 val;
1559 	int ret;
1560 
1561 	mutex_lock(&dev->mdiobus_mutex);
1562 
1563 	/* Resetting the device while there is activity on the MDIO
1564 	 * bus can result in the MAC interface locking up and not
1565 	 * completing register access transactions.
1566 	 */
1567 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1568 	if (ret < 0)
1569 		goto exit_unlock;
1570 
1571 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1572 	if (ret < 0)
1573 		goto exit_unlock;
1574 
1575 	val |= MAC_CR_RST_;
1576 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1577 	if (ret < 0)
1578 		goto exit_unlock;
1579 
1580 	/* Wait for the reset to complete before allowing any further
1581 	 * MAC register accesses otherwise the MAC may lock up.
1582 	 */
1583 	do {
1584 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1585 		if (ret < 0)
1586 			goto exit_unlock;
1587 
1588 		if (!(val & MAC_CR_RST_)) {
1589 			ret = 0;
1590 			goto exit_unlock;
1591 		}
1592 	} while (!time_after(jiffies, start_time + HZ));
1593 
1594 	ret = -ETIMEDOUT;
1595 exit_unlock:
1596 	mutex_unlock(&dev->mdiobus_mutex);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1603  * @dev: pointer to the LAN78xx device structure
1604  *
1605  * This function acknowledges the PHY interrupt by setting the
1606  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1607  *
1608  * Return: 0 on success or a negative error code on failure.
1609  */
1610 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1611 {
1612 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1613 }
1614 
1615 /* some work can't be done in tasklets, so we use keventd
1616  *
1617  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1618  * but tasklet_schedule() doesn't.	hope the failure is rare.
1619  */
1620 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1621 {
1622 	set_bit(work, &dev->flags);
1623 	if (!schedule_delayed_work(&dev->wq, 0))
1624 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1625 }
1626 
1627 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1628 {
1629 	u32 intdata;
1630 
1631 	if (urb->actual_length != 4) {
1632 		netdev_warn(dev->net,
1633 			    "unexpected urb length %d", urb->actual_length);
1634 		return;
1635 	}
1636 
1637 	intdata = get_unaligned_le32(urb->transfer_buffer);
1638 
1639 	if (intdata & INT_ENP_PHY_INT) {
1640 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1641 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1642 
1643 		if (dev->domain_data.phyirq > 0)
1644 			generic_handle_irq_safe(dev->domain_data.phyirq);
1645 	} else {
1646 		netdev_warn(dev->net,
1647 			    "unexpected interrupt: 0x%08x\n", intdata);
1648 	}
1649 }
1650 
1651 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1652 {
1653 	return MAX_EEPROM_SIZE;
1654 }
1655 
1656 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1657 				      struct ethtool_eeprom *ee, u8 *data)
1658 {
1659 	struct lan78xx_net *dev = netdev_priv(netdev);
1660 	int ret;
1661 
1662 	ret = usb_autopm_get_interface(dev->intf);
1663 	if (ret)
1664 		return ret;
1665 
1666 	ee->magic = LAN78XX_EEPROM_MAGIC;
1667 
1668 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1669 
1670 	usb_autopm_put_interface(dev->intf);
1671 
1672 	return ret;
1673 }
1674 
1675 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1676 				      struct ethtool_eeprom *ee, u8 *data)
1677 {
1678 	struct lan78xx_net *dev = netdev_priv(netdev);
1679 	int ret;
1680 
1681 	ret = usb_autopm_get_interface(dev->intf);
1682 	if (ret)
1683 		return ret;
1684 
1685 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1686 	 * to load data from EEPROM
1687 	 */
1688 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1689 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1690 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1691 		 (ee->offset == 0) &&
1692 		 (ee->len == 512) &&
1693 		 (data[0] == OTP_INDICATOR_1))
1694 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1695 
1696 	usb_autopm_put_interface(dev->intf);
1697 
1698 	return ret;
1699 }
1700 
1701 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1702 				u8 *data)
1703 {
1704 	if (stringset == ETH_SS_STATS)
1705 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1706 	else if (stringset == ETH_SS_TEST)
1707 		net_selftest_get_strings(data);
1708 }
1709 
1710 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1711 {
1712 	if (sset == ETH_SS_STATS)
1713 		return ARRAY_SIZE(lan78xx_gstrings);
1714 	else if (sset == ETH_SS_TEST)
1715 		return net_selftest_get_count();
1716 	else
1717 		return -EOPNOTSUPP;
1718 }
1719 
1720 static void lan78xx_get_stats(struct net_device *netdev,
1721 			      struct ethtool_stats *stats, u64 *data)
1722 {
1723 	struct lan78xx_net *dev = netdev_priv(netdev);
1724 
1725 	lan78xx_update_stats(dev);
1726 
1727 	mutex_lock(&dev->stats.access_lock);
1728 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1729 	mutex_unlock(&dev->stats.access_lock);
1730 }
1731 
1732 static void lan78xx_get_wol(struct net_device *netdev,
1733 			    struct ethtool_wolinfo *wol)
1734 {
1735 	struct lan78xx_net *dev = netdev_priv(netdev);
1736 	int ret;
1737 	u32 buf;
1738 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1739 
1740 	if (usb_autopm_get_interface(dev->intf) < 0)
1741 		return;
1742 
1743 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1744 	if (unlikely(ret < 0)) {
1745 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1746 		wol->supported = 0;
1747 		wol->wolopts = 0;
1748 	} else {
1749 		if (buf & USB_CFG_RMT_WKP_) {
1750 			wol->supported = WAKE_ALL;
1751 			wol->wolopts = pdata->wol;
1752 		} else {
1753 			wol->supported = 0;
1754 			wol->wolopts = 0;
1755 		}
1756 	}
1757 
1758 	usb_autopm_put_interface(dev->intf);
1759 }
1760 
1761 static int lan78xx_set_wol(struct net_device *netdev,
1762 			   struct ethtool_wolinfo *wol)
1763 {
1764 	struct lan78xx_net *dev = netdev_priv(netdev);
1765 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1766 	int ret;
1767 
1768 	if (wol->wolopts & ~WAKE_ALL)
1769 		return -EINVAL;
1770 
1771 	ret = usb_autopm_get_interface(dev->intf);
1772 	if (ret < 0)
1773 		return ret;
1774 
1775 	pdata->wol = wol->wolopts;
1776 
1777 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1778 	if (ret < 0)
1779 		goto exit_pm_put;
1780 
1781 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1782 
1783 exit_pm_put:
1784 	usb_autopm_put_interface(dev->intf);
1785 
1786 	return ret;
1787 }
1788 
1789 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1790 {
1791 	struct lan78xx_net *dev = netdev_priv(net);
1792 
1793 	return phylink_ethtool_get_eee(dev->phylink, edata);
1794 }
1795 
1796 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1797 {
1798 	struct lan78xx_net *dev = netdev_priv(net);
1799 
1800 	return phylink_ethtool_set_eee(dev->phylink, edata);
1801 }
1802 
1803 static void lan78xx_get_drvinfo(struct net_device *net,
1804 				struct ethtool_drvinfo *info)
1805 {
1806 	struct lan78xx_net *dev = netdev_priv(net);
1807 
1808 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1809 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1810 }
1811 
1812 static u32 lan78xx_get_msglevel(struct net_device *net)
1813 {
1814 	struct lan78xx_net *dev = netdev_priv(net);
1815 
1816 	return dev->msg_enable;
1817 }
1818 
1819 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1820 {
1821 	struct lan78xx_net *dev = netdev_priv(net);
1822 
1823 	dev->msg_enable = level;
1824 }
1825 
1826 static int lan78xx_get_link_ksettings(struct net_device *net,
1827 				      struct ethtool_link_ksettings *cmd)
1828 {
1829 	struct lan78xx_net *dev = netdev_priv(net);
1830 
1831 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1832 }
1833 
1834 static int lan78xx_set_link_ksettings(struct net_device *net,
1835 				      const struct ethtool_link_ksettings *cmd)
1836 {
1837 	struct lan78xx_net *dev = netdev_priv(net);
1838 
1839 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1840 }
1841 
1842 static void lan78xx_get_pause(struct net_device *net,
1843 			      struct ethtool_pauseparam *pause)
1844 {
1845 	struct lan78xx_net *dev = netdev_priv(net);
1846 
1847 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1848 }
1849 
1850 static int lan78xx_set_pause(struct net_device *net,
1851 			     struct ethtool_pauseparam *pause)
1852 {
1853 	struct lan78xx_net *dev = netdev_priv(net);
1854 
1855 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1856 }
1857 
1858 static int lan78xx_get_regs_len(struct net_device *netdev)
1859 {
1860 	return sizeof(lan78xx_regs);
1861 }
1862 
1863 static void
1864 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1865 		 void *buf)
1866 {
1867 	struct lan78xx_net *dev = netdev_priv(netdev);
1868 	unsigned int data_count = 0;
1869 	u32 *data = buf;
1870 	int i, ret;
1871 
1872 	/* Read Device/MAC registers */
1873 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1874 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1875 		if (ret < 0) {
1876 			netdev_warn(dev->net,
1877 				    "failed to read register 0x%08x\n",
1878 				    lan78xx_regs[i]);
1879 			goto clean_data;
1880 		}
1881 
1882 		data_count++;
1883 	}
1884 
1885 	return;
1886 
1887 clean_data:
1888 	memset(data, 0, data_count * sizeof(u32));
1889 }
1890 
1891 static const struct ethtool_ops lan78xx_ethtool_ops = {
1892 	.get_link	= ethtool_op_get_link,
1893 	.nway_reset	= phy_ethtool_nway_reset,
1894 	.get_drvinfo	= lan78xx_get_drvinfo,
1895 	.get_msglevel	= lan78xx_get_msglevel,
1896 	.set_msglevel	= lan78xx_set_msglevel,
1897 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1898 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1899 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1900 	.get_ethtool_stats = lan78xx_get_stats,
1901 	.get_sset_count = lan78xx_get_sset_count,
1902 	.self_test	= net_selftest,
1903 	.get_strings	= lan78xx_get_strings,
1904 	.get_wol	= lan78xx_get_wol,
1905 	.set_wol	= lan78xx_set_wol,
1906 	.get_ts_info	= ethtool_op_get_ts_info,
1907 	.get_eee	= lan78xx_get_eee,
1908 	.set_eee	= lan78xx_set_eee,
1909 	.get_pauseparam	= lan78xx_get_pause,
1910 	.set_pauseparam	= lan78xx_set_pause,
1911 	.get_link_ksettings = lan78xx_get_link_ksettings,
1912 	.set_link_ksettings = lan78xx_set_link_ksettings,
1913 	.get_regs_len	= lan78xx_get_regs_len,
1914 	.get_regs	= lan78xx_get_regs,
1915 };
1916 
1917 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1918 {
1919 	u32 addr_lo, addr_hi;
1920 	u8 addr[6];
1921 	int ret;
1922 
1923 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1924 	if (ret < 0)
1925 		return ret;
1926 
1927 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1928 	if (ret < 0)
1929 		return ret;
1930 
1931 	addr[0] = addr_lo & 0xFF;
1932 	addr[1] = (addr_lo >> 8) & 0xFF;
1933 	addr[2] = (addr_lo >> 16) & 0xFF;
1934 	addr[3] = (addr_lo >> 24) & 0xFF;
1935 	addr[4] = addr_hi & 0xFF;
1936 	addr[5] = (addr_hi >> 8) & 0xFF;
1937 
1938 	if (!is_valid_ether_addr(addr)) {
1939 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1940 			/* valid address present in Device Tree */
1941 			netif_dbg(dev, ifup, dev->net,
1942 				  "MAC address read from Device Tree");
1943 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1944 						 ETH_ALEN, addr) == 0) ||
1945 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1946 					      ETH_ALEN, addr) == 0)) &&
1947 			   is_valid_ether_addr(addr)) {
1948 			/* eeprom values are valid so use them */
1949 			netif_dbg(dev, ifup, dev->net,
1950 				  "MAC address read from EEPROM");
1951 		} else {
1952 			/* generate random MAC */
1953 			eth_random_addr(addr);
1954 			netif_dbg(dev, ifup, dev->net,
1955 				  "MAC address set to random addr");
1956 		}
1957 
1958 		addr_lo = addr[0] | (addr[1] << 8) |
1959 			  (addr[2] << 16) | (addr[3] << 24);
1960 		addr_hi = addr[4] | (addr[5] << 8);
1961 
1962 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1963 		if (ret < 0)
1964 			return ret;
1965 
1966 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1967 		if (ret < 0)
1968 			return ret;
1969 	}
1970 
1971 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1972 	if (ret < 0)
1973 		return ret;
1974 
1975 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1976 	if (ret < 0)
1977 		return ret;
1978 
1979 	eth_hw_addr_set(dev->net, addr);
1980 
1981 	return 0;
1982 }
1983 
1984 /* MDIO read and write wrappers for phylib */
1985 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1986 {
1987 	struct lan78xx_net *dev = bus->priv;
1988 	u32 val, addr;
1989 	int ret;
1990 
1991 	ret = usb_autopm_get_interface(dev->intf);
1992 	if (ret < 0)
1993 		return ret;
1994 
1995 	mutex_lock(&dev->mdiobus_mutex);
1996 
1997 	/* confirm MII not busy */
1998 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1999 	if (ret < 0)
2000 		goto done;
2001 
2002 	/* set the address, index & direction (read from PHY) */
2003 	addr = mii_access(phy_id, idx, MII_READ);
2004 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2005 	if (ret < 0)
2006 		goto done;
2007 
2008 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2009 	if (ret < 0)
2010 		goto done;
2011 
2012 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2013 	if (ret < 0)
2014 		goto done;
2015 
2016 	ret = (int)(val & 0xFFFF);
2017 
2018 done:
2019 	mutex_unlock(&dev->mdiobus_mutex);
2020 	usb_autopm_put_interface(dev->intf);
2021 
2022 	return ret;
2023 }
2024 
2025 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2026 				 u16 regval)
2027 {
2028 	struct lan78xx_net *dev = bus->priv;
2029 	u32 val, addr;
2030 	int ret;
2031 
2032 	ret = usb_autopm_get_interface(dev->intf);
2033 	if (ret < 0)
2034 		return ret;
2035 
2036 	mutex_lock(&dev->mdiobus_mutex);
2037 
2038 	/* confirm MII not busy */
2039 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2040 	if (ret < 0)
2041 		goto done;
2042 
2043 	val = (u32)regval;
2044 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2045 	if (ret < 0)
2046 		goto done;
2047 
2048 	/* set the address, index & direction (write to PHY) */
2049 	addr = mii_access(phy_id, idx, MII_WRITE);
2050 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2051 	if (ret < 0)
2052 		goto done;
2053 
2054 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2055 	if (ret < 0)
2056 		goto done;
2057 
2058 done:
2059 	mutex_unlock(&dev->mdiobus_mutex);
2060 	usb_autopm_put_interface(dev->intf);
2061 	return ret;
2062 }
2063 
2064 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2065 {
2066 	struct device_node *node;
2067 	int ret;
2068 
2069 	dev->mdiobus = mdiobus_alloc();
2070 	if (!dev->mdiobus) {
2071 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2072 		return -ENOMEM;
2073 	}
2074 
2075 	dev->mdiobus->priv = (void *)dev;
2076 	dev->mdiobus->read = lan78xx_mdiobus_read;
2077 	dev->mdiobus->write = lan78xx_mdiobus_write;
2078 	dev->mdiobus->name = "lan78xx-mdiobus";
2079 	dev->mdiobus->parent = &dev->udev->dev;
2080 
2081 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2082 		 dev->udev->bus->busnum, dev->udev->devnum);
2083 
2084 	switch (dev->chipid) {
2085 	case ID_REV_CHIP_ID_7800_:
2086 	case ID_REV_CHIP_ID_7850_:
2087 		/* set to internal PHY id */
2088 		dev->mdiobus->phy_mask = ~(1 << 1);
2089 		break;
2090 	case ID_REV_CHIP_ID_7801_:
2091 		/* scan thru PHYAD[2..0] */
2092 		dev->mdiobus->phy_mask = ~(0xFF);
2093 		break;
2094 	}
2095 
2096 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2097 	ret = of_mdiobus_register(dev->mdiobus, node);
2098 	of_node_put(node);
2099 	if (ret) {
2100 		netdev_err(dev->net, "can't register MDIO bus\n");
2101 		goto exit1;
2102 	}
2103 
2104 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2105 	return 0;
2106 exit1:
2107 	mdiobus_free(dev->mdiobus);
2108 	return ret;
2109 }
2110 
2111 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2112 {
2113 	mdiobus_unregister(dev->mdiobus);
2114 	mdiobus_free(dev->mdiobus);
2115 }
2116 
2117 static int irq_map(struct irq_domain *d, unsigned int irq,
2118 		   irq_hw_number_t hwirq)
2119 {
2120 	struct irq_domain_data *data = d->host_data;
2121 
2122 	irq_set_chip_data(irq, data);
2123 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2124 	irq_set_noprobe(irq);
2125 
2126 	return 0;
2127 }
2128 
2129 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2130 {
2131 	irq_set_chip_and_handler(irq, NULL, NULL);
2132 	irq_set_chip_data(irq, NULL);
2133 }
2134 
2135 static const struct irq_domain_ops chip_domain_ops = {
2136 	.map	= irq_map,
2137 	.unmap	= irq_unmap,
2138 };
2139 
2140 static void lan78xx_irq_mask(struct irq_data *irqd)
2141 {
2142 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2143 
2144 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2145 }
2146 
2147 static void lan78xx_irq_unmask(struct irq_data *irqd)
2148 {
2149 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2150 
2151 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2152 }
2153 
2154 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2155 {
2156 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2157 
2158 	mutex_lock(&data->irq_lock);
2159 }
2160 
2161 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2162 {
2163 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2164 	struct lan78xx_net *dev =
2165 			container_of(data, struct lan78xx_net, domain_data);
2166 	u32 buf;
2167 	int ret;
2168 
2169 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2170 	 * are only two callbacks executed in non-atomic contex.
2171 	 */
2172 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2173 	if (ret < 0)
2174 		goto irq_bus_sync_unlock;
2175 
2176 	if (buf != data->irqenable)
2177 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2178 
2179 irq_bus_sync_unlock:
2180 	if (ret < 0)
2181 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2182 			   ERR_PTR(ret));
2183 
2184 	mutex_unlock(&data->irq_lock);
2185 }
2186 
2187 static struct irq_chip lan78xx_irqchip = {
2188 	.name			= "lan78xx-irqs",
2189 	.irq_mask		= lan78xx_irq_mask,
2190 	.irq_unmask		= lan78xx_irq_unmask,
2191 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2192 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2193 };
2194 
2195 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2196 {
2197 	struct irq_domain *irqdomain;
2198 	unsigned int irqmap = 0;
2199 	u32 buf;
2200 	int ret = 0;
2201 
2202 	mutex_init(&dev->domain_data.irq_lock);
2203 
2204 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2205 	if (ret < 0)
2206 		return ret;
2207 
2208 	dev->domain_data.irqenable = buf;
2209 
2210 	dev->domain_data.irqchip = &lan78xx_irqchip;
2211 	dev->domain_data.irq_handler = handle_simple_irq;
2212 
2213 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2214 					     &chip_domain_ops, &dev->domain_data);
2215 	if (irqdomain) {
2216 		/* create mapping for PHY interrupt */
2217 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2218 		if (!irqmap) {
2219 			irq_domain_remove(irqdomain);
2220 
2221 			irqdomain = NULL;
2222 			ret = -EINVAL;
2223 		}
2224 	} else {
2225 		ret = -EINVAL;
2226 	}
2227 
2228 	dev->domain_data.irqdomain = irqdomain;
2229 	dev->domain_data.phyirq = irqmap;
2230 
2231 	return ret;
2232 }
2233 
2234 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2235 {
2236 	if (dev->domain_data.phyirq > 0) {
2237 		irq_dispose_mapping(dev->domain_data.phyirq);
2238 
2239 		if (dev->domain_data.irqdomain)
2240 			irq_domain_remove(dev->domain_data.irqdomain);
2241 	}
2242 	dev->domain_data.phyirq = 0;
2243 	dev->domain_data.irqdomain = NULL;
2244 }
2245 
2246 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2247 			       const struct phylink_link_state *state)
2248 {
2249 	struct net_device *net = to_net_dev(config->dev);
2250 	struct lan78xx_net *dev = netdev_priv(net);
2251 	u32 mac_cr = 0;
2252 	int ret;
2253 
2254 	/* Check if the mode is supported */
2255 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2256 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2257 		return;
2258 	}
2259 
2260 	switch (state->interface) {
2261 	case PHY_INTERFACE_MODE_GMII:
2262 		mac_cr |= MAC_CR_GMII_EN_;
2263 		break;
2264 	case PHY_INTERFACE_MODE_RGMII:
2265 	case PHY_INTERFACE_MODE_RGMII_ID:
2266 	case PHY_INTERFACE_MODE_RGMII_TXID:
2267 	case PHY_INTERFACE_MODE_RGMII_RXID:
2268 		break;
2269 	default:
2270 		netdev_warn(net, "Unsupported interface mode: %d\n",
2271 			    state->interface);
2272 		return;
2273 	}
2274 
2275 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2276 	if (ret < 0)
2277 		netdev_err(net, "Failed to config MAC with error %pe\n",
2278 			   ERR_PTR(ret));
2279 }
2280 
2281 static void lan78xx_mac_link_down(struct phylink_config *config,
2282 				  unsigned int mode, phy_interface_t interface)
2283 {
2284 	struct net_device *net = to_net_dev(config->dev);
2285 	struct lan78xx_net *dev = netdev_priv(net);
2286 	int ret;
2287 
2288 	netif_stop_queue(net);
2289 
2290 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2291 	 * manually before reset. TX and RX should be disabled before running
2292 	 * link_up sequence.
2293 	 */
2294 	ret = lan78xx_stop_tx_path(dev);
2295 	if (ret < 0)
2296 		goto link_down_fail;
2297 
2298 	ret = lan78xx_stop_rx_path(dev);
2299 	if (ret < 0)
2300 		goto link_down_fail;
2301 
2302 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2303 	 * really needed, but it was done in previous driver version. So, leave
2304 	 * it here.
2305 	 */
2306 	ret = lan78xx_mac_reset(dev);
2307 	if (ret < 0)
2308 		goto link_down_fail;
2309 
2310 	return;
2311 
2312 link_down_fail:
2313 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2314 		   ERR_PTR(ret));
2315 }
2316 
2317 /**
2318  * lan78xx_configure_usb - Configure USB link power settings
2319  * @dev: pointer to the LAN78xx device structure
2320  * @speed: negotiated Ethernet link speed (in Mbps)
2321  *
2322  * This function configures U1/U2 link power management for SuperSpeed
2323  * USB devices based on the current Ethernet link speed. It uses the
2324  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2325  *
2326  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2327  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2328  *
2329  * Return: 0 on success or a negative error code on failure.
2330  */
2331 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2332 {
2333 	u32 mask, val;
2334 	int ret;
2335 
2336 	/* Only configure USB settings for SuperSpeed devices */
2337 	if (dev->udev->speed != USB_SPEED_SUPER)
2338 		return 0;
2339 
2340 	/* LAN7850 does not support USB 3.x */
2341 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2342 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2343 		return 0;
2344 	}
2345 
2346 	switch (speed) {
2347 	case SPEED_1000:
2348 		/* Disable U2, enable U1 */
2349 		ret = lan78xx_update_reg(dev, USB_CFG1,
2350 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2351 		if (ret < 0)
2352 			return ret;
2353 
2354 		return lan78xx_update_reg(dev, USB_CFG1,
2355 					  USB_CFG1_DEV_U1_INIT_EN_,
2356 					  USB_CFG1_DEV_U1_INIT_EN_);
2357 
2358 	case SPEED_100:
2359 	case SPEED_10:
2360 		/* Enable both U1 and U2 */
2361 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2362 		val = mask;
2363 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2364 
2365 	default:
2366 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2367 		return -EINVAL;
2368 	}
2369 }
2370 
2371 /**
2372  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2373  * @dev: pointer to the LAN78xx device structure
2374  * @tx_pause: enable transmission of pause frames
2375  * @rx_pause: enable reception of pause frames
2376  *
2377  * This function configures the LAN78xx flow control settings by writing
2378  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2379  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2380  * based on USB speed.
2381  *
2382  * The Pause Time field is measured in units of 512-bit times (quanta):
2383  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2384  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2385  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2386  *
2387  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2388  *   - RXUSED is the number of bytes used in the RX FIFO
2389  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2390  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2391  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2392  *
2393  * Thresholds differ by USB speed because available USB bandwidth
2394  * affects how fast packets can be drained from the RX FIFO:
2395  *   - USB 3.x (SuperSpeed):
2396  *       FLOW_ON  = 9216 bytes → 18 units
2397  *       FLOW_OFF = 4096 bytes →  8 units
2398  *   - USB 2.0 (High-Speed):
2399  *       FLOW_ON  = 8704 bytes → 17 units
2400  *       FLOW_OFF = 1024 bytes →  2 units
2401  *
2402  * Note: The FCT_FLOW register must be configured before enabling TX pause
2403  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2404  *
2405  * Return: 0 on success or a negative error code on failure.
2406  */
2407 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2408 					 bool tx_pause, bool rx_pause)
2409 {
2410 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2411 	const u32 pause_time_quanta = 65535;
2412 	u32 fct_flow = 0;
2413 	u32 flow = 0;
2414 	int ret;
2415 
2416 	/* Prepare MAC flow control bits */
2417 	if (tx_pause)
2418 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2419 
2420 	if (rx_pause)
2421 		flow |= FLOW_CR_RX_FCEN_;
2422 
2423 	/* Select RX FIFO thresholds based on USB speed
2424 	 *
2425 	 * FCT_FLOW layout:
2426 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2427 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2428 	 *   thresholds are expressed in units of 512 bytes
2429 	 */
2430 	switch (dev->udev->speed) {
2431 	case USB_SPEED_SUPER:
2432 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2433 		break;
2434 	case USB_SPEED_HIGH:
2435 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2436 		break;
2437 	default:
2438 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2439 			    dev->udev->speed);
2440 		return -EINVAL;
2441 	}
2442 
2443 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2444 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2445 	if (ret < 0)
2446 		return ret;
2447 
2448 	/* Step 2: Enable MAC pause functionality */
2449 	return lan78xx_write_reg(dev, FLOW, flow);
2450 }
2451 
2452 static void lan78xx_mac_link_up(struct phylink_config *config,
2453 				struct phy_device *phy,
2454 				unsigned int mode, phy_interface_t interface,
2455 				int speed, int duplex,
2456 				bool tx_pause, bool rx_pause)
2457 {
2458 	struct net_device *net = to_net_dev(config->dev);
2459 	struct lan78xx_net *dev = netdev_priv(net);
2460 	u32 mac_cr = 0;
2461 	int ret;
2462 
2463 	switch (speed) {
2464 	case SPEED_1000:
2465 		mac_cr |= MAC_CR_SPEED_1000_;
2466 		break;
2467 	case SPEED_100:
2468 		mac_cr |= MAC_CR_SPEED_100_;
2469 		break;
2470 	case SPEED_10:
2471 		mac_cr |= MAC_CR_SPEED_10_;
2472 		break;
2473 	default:
2474 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2475 		return;
2476 	}
2477 
2478 	if (duplex == DUPLEX_FULL)
2479 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2480 
2481 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2482 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2483 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2484 	if (ret < 0)
2485 		goto link_up_fail;
2486 
2487 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2488 	if (ret < 0)
2489 		goto link_up_fail;
2490 
2491 	ret = lan78xx_configure_usb(dev, speed);
2492 	if (ret < 0)
2493 		goto link_up_fail;
2494 
2495 	lan78xx_rx_urb_submit_all(dev);
2496 
2497 	ret = lan78xx_flush_rx_fifo(dev);
2498 	if (ret < 0)
2499 		goto link_up_fail;
2500 
2501 	ret = lan78xx_flush_tx_fifo(dev);
2502 	if (ret < 0)
2503 		goto link_up_fail;
2504 
2505 	ret = lan78xx_start_tx_path(dev);
2506 	if (ret < 0)
2507 		goto link_up_fail;
2508 
2509 	ret = lan78xx_start_rx_path(dev);
2510 	if (ret < 0)
2511 		goto link_up_fail;
2512 
2513 	netif_start_queue(net);
2514 
2515 	return;
2516 
2517 link_up_fail:
2518 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2519 		   ERR_PTR(ret));
2520 }
2521 
2522 /**
2523  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2524  * @dev: LAN78xx device
2525  * @enable: true to enable EEE, false to disable
2526  *
2527  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2528  * Efficient Ethernet (EEE) operation. According to current understanding
2529  * of the LAN7800 documentation, this bit can be modified while TX and RX
2530  * are enabled. No explicit requirement was found to disable data paths
2531  * before changing this bit.
2532  *
2533  * Return: 0 on success or a negative error code
2534  */
2535 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2536 {
2537 	u32 mac_cr = 0;
2538 
2539 	if (enable)
2540 		mac_cr |= MAC_CR_EEE_EN_;
2541 
2542 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2543 }
2544 
2545 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2546 {
2547 	struct net_device *net = to_net_dev(config->dev);
2548 	struct lan78xx_net *dev = netdev_priv(net);
2549 
2550 	lan78xx_mac_eee_enable(dev, false);
2551 }
2552 
2553 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2554 				     bool tx_clk_stop)
2555 {
2556 	struct net_device *net = to_net_dev(config->dev);
2557 	struct lan78xx_net *dev = netdev_priv(net);
2558 	int ret;
2559 
2560 	/* Software should only change this field when Energy Efficient
2561 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2562 	 * EEEEN during probe, and phylink itself guarantees that
2563 	 * mac_disable_tx_lpi() will have been previously called.
2564 	 */
2565 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2566 	if (ret < 0)
2567 		return ret;
2568 
2569 	return lan78xx_mac_eee_enable(dev, true);
2570 }
2571 
2572 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2573 	.mac_config = lan78xx_mac_config,
2574 	.mac_link_down = lan78xx_mac_link_down,
2575 	.mac_link_up = lan78xx_mac_link_up,
2576 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2577 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2578 };
2579 
2580 /**
2581  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2582  * @dev: LAN78xx device
2583  *
2584  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2585  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2586  * to a switch without a visible PHY.
2587  *
2588  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2589  */
2590 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2591 {
2592 	static const struct phylink_link_state state = {
2593 		.speed = SPEED_1000,
2594 		.duplex = DUPLEX_FULL,
2595 	};
2596 
2597 	netdev_info(dev->net,
2598 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2599 
2600 	return phylink_set_fixed_link(dev->phylink, &state);
2601 }
2602 
2603 /**
2604  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2605  * @dev: LAN78xx device structure
2606  *
2607  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2608  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2609  * sets dev->interface based on chip ID and detected PHY type.
2610  *
2611  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2612  */
2613 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2614 {
2615 	struct phy_device *phydev;
2616 
2617 	/* Attempt to locate a PHY on the MDIO bus */
2618 	phydev = phy_find_first(dev->mdiobus);
2619 
2620 	switch (dev->chipid) {
2621 	case ID_REV_CHIP_ID_7801_:
2622 		if (phydev) {
2623 			/* External RGMII PHY detected */
2624 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2625 			phydev->is_internal = false;
2626 
2627 			if (!phydev->drv)
2628 				netdev_warn(dev->net,
2629 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2630 
2631 			return phydev;
2632 		}
2633 
2634 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2635 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2636 		return NULL;
2637 
2638 	case ID_REV_CHIP_ID_7800_:
2639 	case ID_REV_CHIP_ID_7850_:
2640 		if (!phydev)
2641 			return ERR_PTR(-ENODEV);
2642 
2643 		/* These use internal GMII-connected PHY */
2644 		dev->interface = PHY_INTERFACE_MODE_GMII;
2645 		phydev->is_internal = true;
2646 		return phydev;
2647 
2648 	default:
2649 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2650 		return ERR_PTR(-ENODEV);
2651 	}
2652 }
2653 
2654 /**
2655  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2656  * @dev: LAN78xx device
2657  *
2658  * Configure MAC-side registers according to dev->interface, which should be
2659  * set by lan78xx_get_phy().
2660  *
2661  * - For PHY_INTERFACE_MODE_RGMII:
2662  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2663  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2664  *   connected to the KSZ9897 switch, and the link timing is expected to be
2665  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2666  *   assumed here.
2667  *
2668  * - For PHY_INTERFACE_MODE_RGMII_ID:
2669  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2670  *
2671  * - For GMII, no MAC-specific config is needed.
2672  *
2673  * Return: 0 on success or a negative error code.
2674  */
2675 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2676 {
2677 	int ret;
2678 
2679 	switch (dev->interface) {
2680 	case PHY_INTERFACE_MODE_RGMII:
2681 		/* Enable MAC-side TX clock delay */
2682 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2683 					MAC_RGMII_ID_TXC_DELAY_EN_);
2684 		if (ret < 0)
2685 			return ret;
2686 
2687 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2688 		if (ret < 0)
2689 			return ret;
2690 
2691 		ret = lan78xx_update_reg(dev, HW_CFG,
2692 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2693 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2694 		if (ret < 0)
2695 			return ret;
2696 
2697 		break;
2698 
2699 	case PHY_INTERFACE_MODE_RGMII_ID:
2700 		/* Disable MAC-side TXC delay, PHY provides it */
2701 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2702 		if (ret < 0)
2703 			return ret;
2704 
2705 		break;
2706 
2707 	case PHY_INTERFACE_MODE_GMII:
2708 		/* No MAC-specific configuration required */
2709 		break;
2710 
2711 	default:
2712 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2713 			    dev->interface);
2714 		break;
2715 	}
2716 
2717 	return 0;
2718 }
2719 
2720 /**
2721  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2722  * @dev: LAN78xx device
2723  * @phydev: PHY device (must be valid)
2724  *
2725  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2726  * the corresponding number of LEDs by writing to HW_CFG.
2727  *
2728  * This helper preserves the original logic, enabling up to 4 LEDs.
2729  * If the property is not present, this function does nothing.
2730  *
2731  * Return: 0 on success or a negative error code.
2732  */
2733 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2734 					  struct phy_device *phydev)
2735 {
2736 	struct device_node *np = phydev->mdio.dev.of_node;
2737 	u32 reg;
2738 	int len, ret;
2739 
2740 	if (!np)
2741 		return 0;
2742 
2743 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2744 					      sizeof(u32));
2745 	if (len < 0)
2746 		return 0;
2747 
2748 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2749 	if (ret < 0)
2750 		return ret;
2751 
2752 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2753 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2754 
2755 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2756 	       (len > 1) * HW_CFG_LED1_EN_ |
2757 	       (len > 2) * HW_CFG_LED2_EN_ |
2758 	       (len > 3) * HW_CFG_LED3_EN_;
2759 
2760 	return lan78xx_write_reg(dev, HW_CFG, reg);
2761 }
2762 
2763 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2764 {
2765 	struct phylink_config *pc = &dev->phylink_config;
2766 	struct phylink *phylink;
2767 
2768 	pc->dev = &dev->net->dev;
2769 	pc->type = PHYLINK_NETDEV;
2770 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2771 			       MAC_100 | MAC_1000FD;
2772 	pc->mac_managed_pm = true;
2773 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2774 	/*
2775 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2776 	 *
2777 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2778 	 *
2779 	 * Reasoning:
2780 	 * According to the application note in the LAN7800 documentation, a
2781 	 * zero delay may negatively impact the TX data path’s ability to
2782 	 * support Gigabit operation. A value of 50us is recommended as a
2783 	 * reasonable default when the part operates at Gigabit speeds,
2784 	 * balancing stability and power efficiency in EEE mode. This delay can
2785 	 * be increased based on performance testing, as EEE is designed for
2786 	 * scenarios with mostly idle links and occasional bursts of full
2787 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2788 	 * performance without overly aggressive power optimization during
2789 	 * inactive periods.
2790 	 */
2791 	pc->lpi_timer_default = 50;
2792 	pc->eee_enabled_default = true;
2793 
2794 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2795 		phy_interface_set_rgmii(pc->supported_interfaces);
2796 	else
2797 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2798 
2799 	memcpy(dev->phylink_config.lpi_interfaces,
2800 	       dev->phylink_config.supported_interfaces,
2801 	       sizeof(dev->phylink_config.lpi_interfaces));
2802 
2803 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2804 				 dev->interface, &lan78xx_phylink_mac_ops);
2805 	if (IS_ERR(phylink))
2806 		return PTR_ERR(phylink);
2807 
2808 	dev->phylink = phylink;
2809 
2810 	return 0;
2811 }
2812 
2813 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2814 {
2815 	if (dev->phylink) {
2816 		phylink_disconnect_phy(dev->phylink);
2817 		phylink_destroy(dev->phylink);
2818 		dev->phylink = NULL;
2819 	}
2820 }
2821 
2822 static int lan78xx_phy_init(struct lan78xx_net *dev)
2823 {
2824 	struct phy_device *phydev;
2825 	int ret;
2826 
2827 	phydev = lan78xx_get_phy(dev);
2828 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2829 	 * which will use a fixed link later.
2830 	 * If an  error occurs, return the error code immediately.
2831 	 */
2832 	if (IS_ERR(phydev))
2833 		return PTR_ERR(phydev);
2834 
2835 	ret = lan78xx_phylink_setup(dev);
2836 	if (ret < 0)
2837 		return ret;
2838 
2839 	ret = lan78xx_mac_prepare_for_phy(dev);
2840 	if (ret < 0)
2841 		goto phylink_uninit;
2842 
2843 	/* If no PHY is found, set up a fixed link. It is very specific to
2844 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2845 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2846 	 * a visible PHY.
2847 	 */
2848 	if (!phydev) {
2849 		ret = lan78xx_set_fixed_link(dev);
2850 		if (ret < 0)
2851 			goto phylink_uninit;
2852 
2853 		/* No PHY found, so set up a fixed link and return early.
2854 		 * No need to configure PHY IRQ or attach to phylink.
2855 		 */
2856 		return 0;
2857 	}
2858 
2859 	/* if phyirq is not set, use polling mode in phylib */
2860 	if (dev->domain_data.phyirq > 0)
2861 		phydev->irq = dev->domain_data.phyirq;
2862 	else
2863 		phydev->irq = PHY_POLL;
2864 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2865 
2866 	ret = phylink_connect_phy(dev->phylink, phydev);
2867 	if (ret) {
2868 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2869 			   dev->mdiobus->id, ERR_PTR(ret));
2870 		goto phylink_uninit;
2871 	}
2872 
2873 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2874 	if (ret < 0)
2875 		goto phylink_uninit;
2876 
2877 	return 0;
2878 
2879 phylink_uninit:
2880 	lan78xx_phy_uninit(dev);
2881 
2882 	return ret;
2883 }
2884 
2885 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2886 {
2887 	bool rxenabled;
2888 	u32 buf;
2889 	int ret;
2890 
2891 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2892 	if (ret < 0)
2893 		return ret;
2894 
2895 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2896 
2897 	if (rxenabled) {
2898 		buf &= ~MAC_RX_RXEN_;
2899 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2900 		if (ret < 0)
2901 			return ret;
2902 	}
2903 
2904 	/* add 4 to size for FCS */
2905 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2906 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2907 
2908 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2909 	if (ret < 0)
2910 		return ret;
2911 
2912 	if (rxenabled) {
2913 		buf |= MAC_RX_RXEN_;
2914 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2915 		if (ret < 0)
2916 			return ret;
2917 	}
2918 
2919 	return 0;
2920 }
2921 
2922 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2923 {
2924 	struct sk_buff *skb;
2925 	unsigned long flags;
2926 	int count = 0;
2927 
2928 	spin_lock_irqsave(&q->lock, flags);
2929 	while (!skb_queue_empty(q)) {
2930 		struct skb_data	*entry;
2931 		struct urb *urb;
2932 		int ret;
2933 
2934 		skb_queue_walk(q, skb) {
2935 			entry = (struct skb_data *)skb->cb;
2936 			if (entry->state != unlink_start)
2937 				goto found;
2938 		}
2939 		break;
2940 found:
2941 		entry->state = unlink_start;
2942 		urb = entry->urb;
2943 
2944 		/* Get reference count of the URB to avoid it to be
2945 		 * freed during usb_unlink_urb, which may trigger
2946 		 * use-after-free problem inside usb_unlink_urb since
2947 		 * usb_unlink_urb is always racing with .complete
2948 		 * handler(include defer_bh).
2949 		 */
2950 		usb_get_urb(urb);
2951 		spin_unlock_irqrestore(&q->lock, flags);
2952 		/* during some PM-driven resume scenarios,
2953 		 * these (async) unlinks complete immediately
2954 		 */
2955 		ret = usb_unlink_urb(urb);
2956 		if (ret != -EINPROGRESS && ret != 0)
2957 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2958 		else
2959 			count++;
2960 		usb_put_urb(urb);
2961 		spin_lock_irqsave(&q->lock, flags);
2962 	}
2963 	spin_unlock_irqrestore(&q->lock, flags);
2964 	return count;
2965 }
2966 
2967 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2968 {
2969 	struct lan78xx_net *dev = netdev_priv(netdev);
2970 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2971 	int ret;
2972 
2973 	/* no second zero-length packet read wanted after mtu-sized packets */
2974 	if ((max_frame_len % dev->maxpacket) == 0)
2975 		return -EDOM;
2976 
2977 	ret = usb_autopm_get_interface(dev->intf);
2978 	if (ret < 0)
2979 		return ret;
2980 
2981 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2982 	if (ret < 0)
2983 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2984 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2985 	else
2986 		WRITE_ONCE(netdev->mtu, new_mtu);
2987 
2988 	usb_autopm_put_interface(dev->intf);
2989 
2990 	return ret;
2991 }
2992 
2993 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2994 {
2995 	struct lan78xx_net *dev = netdev_priv(netdev);
2996 	struct sockaddr *addr = p;
2997 	u32 addr_lo, addr_hi;
2998 	int ret;
2999 
3000 	if (netif_running(netdev))
3001 		return -EBUSY;
3002 
3003 	if (!is_valid_ether_addr(addr->sa_data))
3004 		return -EADDRNOTAVAIL;
3005 
3006 	eth_hw_addr_set(netdev, addr->sa_data);
3007 
3008 	addr_lo = netdev->dev_addr[0] |
3009 		  netdev->dev_addr[1] << 8 |
3010 		  netdev->dev_addr[2] << 16 |
3011 		  netdev->dev_addr[3] << 24;
3012 	addr_hi = netdev->dev_addr[4] |
3013 		  netdev->dev_addr[5] << 8;
3014 
3015 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3016 	if (ret < 0)
3017 		return ret;
3018 
3019 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	/* Added to support MAC address changes */
3024 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3025 	if (ret < 0)
3026 		return ret;
3027 
3028 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3029 }
3030 
3031 /* Enable or disable Rx checksum offload engine */
3032 static int lan78xx_set_features(struct net_device *netdev,
3033 				netdev_features_t features)
3034 {
3035 	struct lan78xx_net *dev = netdev_priv(netdev);
3036 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3037 	unsigned long flags;
3038 
3039 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3040 
3041 	if (features & NETIF_F_RXCSUM) {
3042 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3043 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3044 	} else {
3045 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3046 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3047 	}
3048 
3049 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3050 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3051 	else
3052 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3053 
3054 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3055 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3056 	else
3057 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3058 
3059 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3060 
3061 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3062 }
3063 
3064 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3065 {
3066 	struct lan78xx_priv *pdata =
3067 			container_of(param, struct lan78xx_priv, set_vlan);
3068 	struct lan78xx_net *dev = pdata->dev;
3069 
3070 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3071 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3072 }
3073 
3074 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3075 				   __be16 proto, u16 vid)
3076 {
3077 	struct lan78xx_net *dev = netdev_priv(netdev);
3078 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3079 	u16 vid_bit_index;
3080 	u16 vid_dword_index;
3081 
3082 	vid_dword_index = (vid >> 5) & 0x7F;
3083 	vid_bit_index = vid & 0x1F;
3084 
3085 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3086 
3087 	/* defer register writes to a sleepable context */
3088 	schedule_work(&pdata->set_vlan);
3089 
3090 	return 0;
3091 }
3092 
3093 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3094 				    __be16 proto, u16 vid)
3095 {
3096 	struct lan78xx_net *dev = netdev_priv(netdev);
3097 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3098 	u16 vid_bit_index;
3099 	u16 vid_dword_index;
3100 
3101 	vid_dword_index = (vid >> 5) & 0x7F;
3102 	vid_bit_index = vid & 0x1F;
3103 
3104 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3105 
3106 	/* defer register writes to a sleepable context */
3107 	schedule_work(&pdata->set_vlan);
3108 
3109 	return 0;
3110 }
3111 
3112 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3113 {
3114 	u32 regs[6] = { 0 };
3115 	int ret;
3116 	u32 buf;
3117 
3118 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3119 	if (ret < 0)
3120 		goto init_ltm_failed;
3121 
3122 	if (buf & USB_CFG1_LTM_ENABLE_) {
3123 		u8 temp[2];
3124 		/* Get values from EEPROM first */
3125 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3126 			if (temp[0] == 24) {
3127 				ret = lan78xx_read_raw_eeprom(dev,
3128 							      temp[1] * 2,
3129 							      24,
3130 							      (u8 *)regs);
3131 				if (ret < 0)
3132 					return ret;
3133 			}
3134 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3135 			if (temp[0] == 24) {
3136 				ret = lan78xx_read_raw_otp(dev,
3137 							   temp[1] * 2,
3138 							   24,
3139 							   (u8 *)regs);
3140 				if (ret < 0)
3141 					return ret;
3142 			}
3143 		}
3144 	}
3145 
3146 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3147 	if (ret < 0)
3148 		goto init_ltm_failed;
3149 
3150 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3151 	if (ret < 0)
3152 		goto init_ltm_failed;
3153 
3154 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3155 	if (ret < 0)
3156 		goto init_ltm_failed;
3157 
3158 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3159 	if (ret < 0)
3160 		goto init_ltm_failed;
3161 
3162 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3163 	if (ret < 0)
3164 		goto init_ltm_failed;
3165 
3166 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3167 	if (ret < 0)
3168 		goto init_ltm_failed;
3169 
3170 	return 0;
3171 
3172 init_ltm_failed:
3173 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3174 	return ret;
3175 }
3176 
3177 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3178 {
3179 	int result = 0;
3180 
3181 	switch (dev->udev->speed) {
3182 	case USB_SPEED_SUPER:
3183 		dev->rx_urb_size = RX_SS_URB_SIZE;
3184 		dev->tx_urb_size = TX_SS_URB_SIZE;
3185 		dev->n_rx_urbs = RX_SS_URB_NUM;
3186 		dev->n_tx_urbs = TX_SS_URB_NUM;
3187 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3188 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3189 		break;
3190 	case USB_SPEED_HIGH:
3191 		dev->rx_urb_size = RX_HS_URB_SIZE;
3192 		dev->tx_urb_size = TX_HS_URB_SIZE;
3193 		dev->n_rx_urbs = RX_HS_URB_NUM;
3194 		dev->n_tx_urbs = TX_HS_URB_NUM;
3195 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3196 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3197 		break;
3198 	case USB_SPEED_FULL:
3199 		dev->rx_urb_size = RX_FS_URB_SIZE;
3200 		dev->tx_urb_size = TX_FS_URB_SIZE;
3201 		dev->n_rx_urbs = RX_FS_URB_NUM;
3202 		dev->n_tx_urbs = TX_FS_URB_NUM;
3203 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3204 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3205 		break;
3206 	default:
3207 		netdev_warn(dev->net, "USB bus speed not supported\n");
3208 		result = -EIO;
3209 		break;
3210 	}
3211 
3212 	return result;
3213 }
3214 
3215 static int lan78xx_reset(struct lan78xx_net *dev)
3216 {
3217 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3218 	unsigned long timeout;
3219 	int ret;
3220 	u32 buf;
3221 
3222 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3223 	if (ret < 0)
3224 		return ret;
3225 
3226 	buf |= HW_CFG_LRST_;
3227 
3228 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3229 	if (ret < 0)
3230 		return ret;
3231 
3232 	timeout = jiffies + HZ;
3233 	do {
3234 		mdelay(1);
3235 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3236 		if (ret < 0)
3237 			return ret;
3238 
3239 		if (time_after(jiffies, timeout)) {
3240 			netdev_warn(dev->net,
3241 				    "timeout on completion of LiteReset");
3242 			ret = -ETIMEDOUT;
3243 			return ret;
3244 		}
3245 	} while (buf & HW_CFG_LRST_);
3246 
3247 	ret = lan78xx_init_mac_address(dev);
3248 	if (ret < 0)
3249 		return ret;
3250 
3251 	/* save DEVID for later usage */
3252 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3253 	if (ret < 0)
3254 		return ret;
3255 
3256 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3257 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3258 
3259 	/* Respond to the IN token with a NAK */
3260 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3261 	if (ret < 0)
3262 		return ret;
3263 
3264 	buf |= USB_CFG_BIR_;
3265 
3266 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3267 	if (ret < 0)
3268 		return ret;
3269 
3270 	/* Init LTM */
3271 	ret = lan78xx_init_ltm(dev);
3272 	if (ret < 0)
3273 		return ret;
3274 
3275 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3276 	if (ret < 0)
3277 		return ret;
3278 
3279 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3280 	if (ret < 0)
3281 		return ret;
3282 
3283 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3284 	if (ret < 0)
3285 		return ret;
3286 
3287 	buf |= HW_CFG_MEF_;
3288 	buf |= HW_CFG_CLK125_EN_;
3289 	buf |= HW_CFG_REFCLK25_EN_;
3290 
3291 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3292 	if (ret < 0)
3293 		return ret;
3294 
3295 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3296 	if (ret < 0)
3297 		return ret;
3298 
3299 	buf |= USB_CFG_BCE_;
3300 
3301 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3302 	if (ret < 0)
3303 		return ret;
3304 
3305 	/* set FIFO sizes */
3306 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3307 
3308 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3309 	if (ret < 0)
3310 		return ret;
3311 
3312 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3313 
3314 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3315 	if (ret < 0)
3316 		return ret;
3317 
3318 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3319 	if (ret < 0)
3320 		return ret;
3321 
3322 	ret = lan78xx_write_reg(dev, FLOW, 0);
3323 	if (ret < 0)
3324 		return ret;
3325 
3326 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3327 	if (ret < 0)
3328 		return ret;
3329 
3330 	/* Don't need rfe_ctl_lock during initialisation */
3331 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3332 	if (ret < 0)
3333 		return ret;
3334 
3335 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3336 
3337 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3338 	if (ret < 0)
3339 		return ret;
3340 
3341 	/* Enable or disable checksum offload engines */
3342 	ret = lan78xx_set_features(dev->net, dev->net->features);
3343 	if (ret < 0)
3344 		return ret;
3345 
3346 	lan78xx_set_multicast(dev->net);
3347 
3348 	/* reset PHY */
3349 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3350 	if (ret < 0)
3351 		return ret;
3352 
3353 	buf |= PMT_CTL_PHY_RST_;
3354 
3355 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3356 	if (ret < 0)
3357 		return ret;
3358 
3359 	timeout = jiffies + HZ;
3360 	do {
3361 		mdelay(1);
3362 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3363 		if (ret < 0)
3364 			return ret;
3365 
3366 		if (time_after(jiffies, timeout)) {
3367 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3368 			ret = -ETIMEDOUT;
3369 			return ret;
3370 		}
3371 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3372 
3373 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3374 	if (ret < 0)
3375 		return ret;
3376 
3377 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3378 
3379 	/* LAN7801 only has RGMII mode */
3380 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3381 		buf &= ~MAC_CR_GMII_EN_;
3382 
3383 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3384 	if (ret < 0)
3385 		return ret;
3386 
3387 	ret = lan78xx_set_rx_max_frame_length(dev,
3388 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3389 
3390 	return ret;
3391 }
3392 
3393 static void lan78xx_init_stats(struct lan78xx_net *dev)
3394 {
3395 	u32 *p;
3396 	int i;
3397 
3398 	/* initialize for stats update
3399 	 * some counters are 20bits and some are 32bits
3400 	 */
3401 	p = (u32 *)&dev->stats.rollover_max;
3402 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3403 		p[i] = 0xFFFFF;
3404 
3405 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3406 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3407 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3408 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3409 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3410 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3411 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3415 
3416 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3417 }
3418 
3419 static int lan78xx_open(struct net_device *net)
3420 {
3421 	struct lan78xx_net *dev = netdev_priv(net);
3422 	int ret;
3423 
3424 	netif_dbg(dev, ifup, dev->net, "open device");
3425 
3426 	ret = usb_autopm_get_interface(dev->intf);
3427 	if (ret < 0)
3428 		return ret;
3429 
3430 	mutex_lock(&dev->dev_mutex);
3431 
3432 	lan78xx_init_stats(dev);
3433 
3434 	napi_enable(&dev->napi);
3435 
3436 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3437 
3438 	/* for Link Check */
3439 	if (dev->urb_intr) {
3440 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3441 		if (ret < 0) {
3442 			netif_err(dev, ifup, dev->net,
3443 				  "intr submit %d\n", ret);
3444 			goto done;
3445 		}
3446 	}
3447 
3448 	phylink_start(dev->phylink);
3449 
3450 done:
3451 	mutex_unlock(&dev->dev_mutex);
3452 
3453 	if (ret < 0)
3454 		usb_autopm_put_interface(dev->intf);
3455 
3456 	return ret;
3457 }
3458 
3459 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3460 {
3461 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3462 	DECLARE_WAITQUEUE(wait, current);
3463 	int temp;
3464 
3465 	/* ensure there are no more active urbs */
3466 	add_wait_queue(&unlink_wakeup, &wait);
3467 	set_current_state(TASK_UNINTERRUPTIBLE);
3468 	dev->wait = &unlink_wakeup;
3469 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3470 
3471 	/* maybe wait for deletions to finish. */
3472 	while (!skb_queue_empty(&dev->rxq) ||
3473 	       !skb_queue_empty(&dev->txq)) {
3474 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3475 		set_current_state(TASK_UNINTERRUPTIBLE);
3476 		netif_dbg(dev, ifdown, dev->net,
3477 			  "waited for %d urb completions", temp);
3478 	}
3479 	set_current_state(TASK_RUNNING);
3480 	dev->wait = NULL;
3481 	remove_wait_queue(&unlink_wakeup, &wait);
3482 
3483 	/* empty Rx done, Rx overflow and Tx pend queues
3484 	 */
3485 	while (!skb_queue_empty(&dev->rxq_done)) {
3486 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3487 
3488 		lan78xx_release_rx_buf(dev, skb);
3489 	}
3490 
3491 	skb_queue_purge(&dev->rxq_overflow);
3492 	skb_queue_purge(&dev->txq_pend);
3493 }
3494 
3495 static int lan78xx_stop(struct net_device *net)
3496 {
3497 	struct lan78xx_net *dev = netdev_priv(net);
3498 
3499 	netif_dbg(dev, ifup, dev->net, "stop device");
3500 
3501 	mutex_lock(&dev->dev_mutex);
3502 
3503 	if (timer_pending(&dev->stat_monitor))
3504 		timer_delete_sync(&dev->stat_monitor);
3505 
3506 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3507 	napi_disable(&dev->napi);
3508 
3509 	lan78xx_terminate_urbs(dev);
3510 
3511 	netif_info(dev, ifdown, dev->net,
3512 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3513 		   net->stats.rx_packets, net->stats.tx_packets,
3514 		   net->stats.rx_errors, net->stats.tx_errors);
3515 
3516 	phylink_stop(dev->phylink);
3517 
3518 	usb_kill_urb(dev->urb_intr);
3519 
3520 	/* deferred work (task, timer, softirq) must also stop.
3521 	 * can't flush_scheduled_work() until we drop rtnl (later),
3522 	 * else workers could deadlock; so make workers a NOP.
3523 	 */
3524 	clear_bit(EVENT_TX_HALT, &dev->flags);
3525 	clear_bit(EVENT_RX_HALT, &dev->flags);
3526 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3527 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3528 
3529 	cancel_delayed_work_sync(&dev->wq);
3530 
3531 	usb_autopm_put_interface(dev->intf);
3532 
3533 	mutex_unlock(&dev->dev_mutex);
3534 
3535 	return 0;
3536 }
3537 
3538 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3539 			       struct sk_buff_head *list, enum skb_state state)
3540 {
3541 	unsigned long flags;
3542 	enum skb_state old_state;
3543 	struct skb_data *entry = (struct skb_data *)skb->cb;
3544 
3545 	spin_lock_irqsave(&list->lock, flags);
3546 	old_state = entry->state;
3547 	entry->state = state;
3548 
3549 	__skb_unlink(skb, list);
3550 	spin_unlock(&list->lock);
3551 	spin_lock(&dev->rxq_done.lock);
3552 
3553 	__skb_queue_tail(&dev->rxq_done, skb);
3554 	if (skb_queue_len(&dev->rxq_done) == 1)
3555 		napi_schedule(&dev->napi);
3556 
3557 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3558 
3559 	return old_state;
3560 }
3561 
3562 static void tx_complete(struct urb *urb)
3563 {
3564 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3565 	struct skb_data *entry = (struct skb_data *)skb->cb;
3566 	struct lan78xx_net *dev = entry->dev;
3567 
3568 	if (urb->status == 0) {
3569 		dev->net->stats.tx_packets += entry->num_of_packet;
3570 		dev->net->stats.tx_bytes += entry->length;
3571 	} else {
3572 		dev->net->stats.tx_errors += entry->num_of_packet;
3573 
3574 		switch (urb->status) {
3575 		case -EPIPE:
3576 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3577 			break;
3578 
3579 		/* software-driven interface shutdown */
3580 		case -ECONNRESET:
3581 		case -ESHUTDOWN:
3582 			netif_dbg(dev, tx_err, dev->net,
3583 				  "tx err interface gone %d\n",
3584 				  entry->urb->status);
3585 			break;
3586 
3587 		case -EPROTO:
3588 		case -ETIME:
3589 		case -EILSEQ:
3590 			netif_stop_queue(dev->net);
3591 			netif_dbg(dev, tx_err, dev->net,
3592 				  "tx err queue stopped %d\n",
3593 				  entry->urb->status);
3594 			break;
3595 		default:
3596 			netif_dbg(dev, tx_err, dev->net,
3597 				  "unknown tx err %d\n",
3598 				  entry->urb->status);
3599 			break;
3600 		}
3601 	}
3602 
3603 	usb_autopm_put_interface_async(dev->intf);
3604 
3605 	skb_unlink(skb, &dev->txq);
3606 
3607 	lan78xx_release_tx_buf(dev, skb);
3608 
3609 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3610 	 */
3611 	if (skb_queue_empty(&dev->txq) &&
3612 	    !skb_queue_empty(&dev->txq_pend))
3613 		napi_schedule(&dev->napi);
3614 }
3615 
3616 static void lan78xx_queue_skb(struct sk_buff_head *list,
3617 			      struct sk_buff *newsk, enum skb_state state)
3618 {
3619 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3620 
3621 	__skb_queue_tail(list, newsk);
3622 	entry->state = state;
3623 }
3624 
3625 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3626 {
3627 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3628 }
3629 
3630 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3631 {
3632 	return dev->tx_pend_data_len;
3633 }
3634 
3635 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3636 				    struct sk_buff *skb,
3637 				    unsigned int *tx_pend_data_len)
3638 {
3639 	unsigned long flags;
3640 
3641 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3642 
3643 	__skb_queue_tail(&dev->txq_pend, skb);
3644 
3645 	dev->tx_pend_data_len += skb->len;
3646 	*tx_pend_data_len = dev->tx_pend_data_len;
3647 
3648 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3649 }
3650 
3651 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3652 					 struct sk_buff *skb,
3653 					 unsigned int *tx_pend_data_len)
3654 {
3655 	unsigned long flags;
3656 
3657 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3658 
3659 	__skb_queue_head(&dev->txq_pend, skb);
3660 
3661 	dev->tx_pend_data_len += skb->len;
3662 	*tx_pend_data_len = dev->tx_pend_data_len;
3663 
3664 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3665 }
3666 
3667 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3668 				    struct sk_buff **skb,
3669 				    unsigned int *tx_pend_data_len)
3670 {
3671 	unsigned long flags;
3672 
3673 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3674 
3675 	*skb = __skb_dequeue(&dev->txq_pend);
3676 	if (*skb)
3677 		dev->tx_pend_data_len -= (*skb)->len;
3678 	*tx_pend_data_len = dev->tx_pend_data_len;
3679 
3680 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3681 }
3682 
3683 static netdev_tx_t
3684 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3685 {
3686 	struct lan78xx_net *dev = netdev_priv(net);
3687 	unsigned int tx_pend_data_len;
3688 
3689 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3690 		schedule_delayed_work(&dev->wq, 0);
3691 
3692 	skb_tx_timestamp(skb);
3693 
3694 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3695 
3696 	/* Set up a Tx URB if none is in progress */
3697 
3698 	if (skb_queue_empty(&dev->txq))
3699 		napi_schedule(&dev->napi);
3700 
3701 	/* Stop stack Tx queue if we have enough data to fill
3702 	 * all the free Tx URBs.
3703 	 */
3704 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3705 		netif_stop_queue(net);
3706 
3707 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3708 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3709 
3710 		/* Kick off transmission of pending data */
3711 
3712 		if (!skb_queue_empty(&dev->txq_free))
3713 			napi_schedule(&dev->napi);
3714 	}
3715 
3716 	return NETDEV_TX_OK;
3717 }
3718 
3719 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3720 {
3721 	struct lan78xx_priv *pdata = NULL;
3722 	int ret;
3723 	int i;
3724 
3725 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3726 
3727 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3728 	if (!pdata) {
3729 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3730 		return -ENOMEM;
3731 	}
3732 
3733 	pdata->dev = dev;
3734 
3735 	spin_lock_init(&pdata->rfe_ctl_lock);
3736 	mutex_init(&pdata->dataport_mutex);
3737 
3738 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3739 
3740 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3741 		pdata->vlan_table[i] = 0;
3742 
3743 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3744 
3745 	dev->net->features = 0;
3746 
3747 	if (DEFAULT_TX_CSUM_ENABLE)
3748 		dev->net->features |= NETIF_F_HW_CSUM;
3749 
3750 	if (DEFAULT_RX_CSUM_ENABLE)
3751 		dev->net->features |= NETIF_F_RXCSUM;
3752 
3753 	if (DEFAULT_TSO_CSUM_ENABLE)
3754 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3755 
3756 	if (DEFAULT_VLAN_RX_OFFLOAD)
3757 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3758 
3759 	if (DEFAULT_VLAN_FILTER_ENABLE)
3760 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3761 
3762 	dev->net->hw_features = dev->net->features;
3763 
3764 	ret = lan78xx_setup_irq_domain(dev);
3765 	if (ret < 0) {
3766 		netdev_warn(dev->net,
3767 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3768 		goto out1;
3769 	}
3770 
3771 	/* Init all registers */
3772 	ret = lan78xx_reset(dev);
3773 	if (ret) {
3774 		netdev_warn(dev->net, "Registers INIT FAILED....");
3775 		goto out2;
3776 	}
3777 
3778 	ret = lan78xx_mdio_init(dev);
3779 	if (ret) {
3780 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3781 		goto out2;
3782 	}
3783 
3784 	dev->net->flags |= IFF_MULTICAST;
3785 
3786 	pdata->wol = WAKE_MAGIC;
3787 
3788 	return ret;
3789 
3790 out2:
3791 	lan78xx_remove_irq_domain(dev);
3792 
3793 out1:
3794 	netdev_warn(dev->net, "Bind routine FAILED");
3795 	cancel_work_sync(&pdata->set_multicast);
3796 	cancel_work_sync(&pdata->set_vlan);
3797 	kfree(pdata);
3798 	return ret;
3799 }
3800 
3801 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3802 {
3803 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3804 
3805 	lan78xx_remove_irq_domain(dev);
3806 
3807 	lan78xx_remove_mdio(dev);
3808 
3809 	if (pdata) {
3810 		cancel_work_sync(&pdata->set_multicast);
3811 		cancel_work_sync(&pdata->set_vlan);
3812 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3813 		kfree(pdata);
3814 		pdata = NULL;
3815 		dev->data[0] = 0;
3816 	}
3817 }
3818 
3819 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3820 				    struct sk_buff *skb,
3821 				    u32 rx_cmd_a, u32 rx_cmd_b)
3822 {
3823 	/* HW Checksum offload appears to be flawed if used when not stripping
3824 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3825 	 */
3826 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3827 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3828 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3829 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3830 		skb->ip_summed = CHECKSUM_NONE;
3831 	} else {
3832 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3833 		skb->ip_summed = CHECKSUM_COMPLETE;
3834 	}
3835 }
3836 
3837 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3838 				    struct sk_buff *skb,
3839 				    u32 rx_cmd_a, u32 rx_cmd_b)
3840 {
3841 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3842 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3843 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3844 				       (rx_cmd_b & 0xffff));
3845 }
3846 
3847 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3848 {
3849 	dev->net->stats.rx_packets++;
3850 	dev->net->stats.rx_bytes += skb->len;
3851 
3852 	skb->protocol = eth_type_trans(skb, dev->net);
3853 
3854 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3855 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3856 	memset(skb->cb, 0, sizeof(struct skb_data));
3857 
3858 	if (skb_defer_rx_timestamp(skb))
3859 		return;
3860 
3861 	napi_gro_receive(&dev->napi, skb);
3862 }
3863 
3864 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3865 		      int budget, int *work_done)
3866 {
3867 	if (skb->len < RX_SKB_MIN_LEN)
3868 		return 0;
3869 
3870 	/* Extract frames from the URB buffer and pass each one to
3871 	 * the stack in a new NAPI SKB.
3872 	 */
3873 	while (skb->len > 0) {
3874 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3875 		u16 rx_cmd_c;
3876 		unsigned char *packet;
3877 
3878 		rx_cmd_a = get_unaligned_le32(skb->data);
3879 		skb_pull(skb, sizeof(rx_cmd_a));
3880 
3881 		rx_cmd_b = get_unaligned_le32(skb->data);
3882 		skb_pull(skb, sizeof(rx_cmd_b));
3883 
3884 		rx_cmd_c = get_unaligned_le16(skb->data);
3885 		skb_pull(skb, sizeof(rx_cmd_c));
3886 
3887 		packet = skb->data;
3888 
3889 		/* get the packet length */
3890 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3891 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3892 
3893 		if (unlikely(size > skb->len)) {
3894 			netif_dbg(dev, rx_err, dev->net,
3895 				  "size err rx_cmd_a=0x%08x\n",
3896 				  rx_cmd_a);
3897 			return 0;
3898 		}
3899 
3900 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3901 			netif_dbg(dev, rx_err, dev->net,
3902 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3903 		} else {
3904 			u32 frame_len;
3905 			struct sk_buff *skb2;
3906 
3907 			if (unlikely(size < ETH_FCS_LEN)) {
3908 				netif_dbg(dev, rx_err, dev->net,
3909 					  "size err rx_cmd_a=0x%08x\n",
3910 					  rx_cmd_a);
3911 				return 0;
3912 			}
3913 
3914 			frame_len = size - ETH_FCS_LEN;
3915 
3916 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3917 			if (!skb2)
3918 				return 0;
3919 
3920 			memcpy(skb2->data, packet, frame_len);
3921 
3922 			skb_put(skb2, frame_len);
3923 
3924 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3925 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3926 
3927 			/* Processing of the URB buffer must complete once
3928 			 * it has started. If the NAPI work budget is exhausted
3929 			 * while frames remain they are added to the overflow
3930 			 * queue for delivery in the next NAPI polling cycle.
3931 			 */
3932 			if (*work_done < budget) {
3933 				lan78xx_skb_return(dev, skb2);
3934 				++(*work_done);
3935 			} else {
3936 				skb_queue_tail(&dev->rxq_overflow, skb2);
3937 			}
3938 		}
3939 
3940 		skb_pull(skb, size);
3941 
3942 		/* skip padding bytes before the next frame starts */
3943 		if (skb->len)
3944 			skb_pull(skb, align_count);
3945 	}
3946 
3947 	return 1;
3948 }
3949 
3950 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3951 			      int budget, int *work_done)
3952 {
3953 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3954 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3955 		dev->net->stats.rx_errors++;
3956 	}
3957 }
3958 
3959 static void rx_complete(struct urb *urb)
3960 {
3961 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3962 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3963 	struct lan78xx_net *dev = entry->dev;
3964 	int urb_status = urb->status;
3965 	enum skb_state state;
3966 
3967 	netif_dbg(dev, rx_status, dev->net,
3968 		  "rx done: status %d", urb->status);
3969 
3970 	skb_put(skb, urb->actual_length);
3971 	state = rx_done;
3972 
3973 	if (urb != entry->urb)
3974 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3975 
3976 	switch (urb_status) {
3977 	case 0:
3978 		if (skb->len < RX_SKB_MIN_LEN) {
3979 			state = rx_cleanup;
3980 			dev->net->stats.rx_errors++;
3981 			dev->net->stats.rx_length_errors++;
3982 			netif_dbg(dev, rx_err, dev->net,
3983 				  "rx length %d\n", skb->len);
3984 		}
3985 		usb_mark_last_busy(dev->udev);
3986 		break;
3987 	case -EPIPE:
3988 		dev->net->stats.rx_errors++;
3989 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3990 		fallthrough;
3991 	case -ECONNRESET:				/* async unlink */
3992 	case -ESHUTDOWN:				/* hardware gone */
3993 		netif_dbg(dev, ifdown, dev->net,
3994 			  "rx shutdown, code %d\n", urb_status);
3995 		state = rx_cleanup;
3996 		break;
3997 	case -EPROTO:
3998 	case -ETIME:
3999 	case -EILSEQ:
4000 		dev->net->stats.rx_errors++;
4001 		state = rx_cleanup;
4002 		break;
4003 
4004 	/* data overrun ... flush fifo? */
4005 	case -EOVERFLOW:
4006 		dev->net->stats.rx_over_errors++;
4007 		fallthrough;
4008 
4009 	default:
4010 		state = rx_cleanup;
4011 		dev->net->stats.rx_errors++;
4012 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4013 		break;
4014 	}
4015 
4016 	state = defer_bh(dev, skb, &dev->rxq, state);
4017 }
4018 
4019 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4020 {
4021 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4022 	size_t size = dev->rx_urb_size;
4023 	struct urb *urb = entry->urb;
4024 	unsigned long lockflags;
4025 	int ret = 0;
4026 
4027 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4028 			  skb->data, size, rx_complete, skb);
4029 
4030 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4031 
4032 	if (netif_device_present(dev->net) &&
4033 	    netif_running(dev->net) &&
4034 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4035 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4036 		ret = usb_submit_urb(urb, flags);
4037 		switch (ret) {
4038 		case 0:
4039 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4040 			break;
4041 		case -EPIPE:
4042 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4043 			break;
4044 		case -ENODEV:
4045 		case -ENOENT:
4046 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4047 			netif_device_detach(dev->net);
4048 			break;
4049 		case -EHOSTUNREACH:
4050 			ret = -ENOLINK;
4051 			napi_schedule(&dev->napi);
4052 			break;
4053 		default:
4054 			netif_dbg(dev, rx_err, dev->net,
4055 				  "rx submit, %d\n", ret);
4056 			napi_schedule(&dev->napi);
4057 			break;
4058 		}
4059 	} else {
4060 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4061 		ret = -ENOLINK;
4062 	}
4063 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4064 
4065 	if (ret)
4066 		lan78xx_release_rx_buf(dev, skb);
4067 
4068 	return ret;
4069 }
4070 
4071 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4072 {
4073 	struct sk_buff *rx_buf;
4074 
4075 	/* Ensure the maximum number of Rx URBs is submitted
4076 	 */
4077 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4078 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4079 			break;
4080 	}
4081 }
4082 
4083 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4084 				    struct sk_buff *rx_buf)
4085 {
4086 	/* reset SKB data pointers */
4087 
4088 	rx_buf->data = rx_buf->head;
4089 	skb_reset_tail_pointer(rx_buf);
4090 	rx_buf->len = 0;
4091 	rx_buf->data_len = 0;
4092 
4093 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4094 }
4095 
4096 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4097 {
4098 	u32 tx_cmd_a;
4099 	u32 tx_cmd_b;
4100 
4101 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4102 
4103 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4104 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4105 
4106 	tx_cmd_b = 0;
4107 	if (skb_is_gso(skb)) {
4108 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4109 
4110 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4111 
4112 		tx_cmd_a |= TX_CMD_A_LSO_;
4113 	}
4114 
4115 	if (skb_vlan_tag_present(skb)) {
4116 		tx_cmd_a |= TX_CMD_A_IVTG_;
4117 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4118 	}
4119 
4120 	put_unaligned_le32(tx_cmd_a, buffer);
4121 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4122 }
4123 
4124 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4125 					    struct sk_buff *tx_buf)
4126 {
4127 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4128 	int remain = dev->tx_urb_size;
4129 	u8 *tx_data = tx_buf->data;
4130 	u32 urb_len = 0;
4131 
4132 	entry->num_of_packet = 0;
4133 	entry->length = 0;
4134 
4135 	/* Work through the pending SKBs and copy the data of each SKB into
4136 	 * the URB buffer if there room for all the SKB data.
4137 	 *
4138 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4139 	 */
4140 	while (remain >= TX_SKB_MIN_LEN) {
4141 		unsigned int pending_bytes;
4142 		unsigned int align_bytes;
4143 		struct sk_buff *skb;
4144 		unsigned int len;
4145 
4146 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4147 
4148 		if (!skb)
4149 			break;
4150 
4151 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4152 			      TX_ALIGNMENT;
4153 		len = align_bytes + TX_CMD_LEN + skb->len;
4154 		if (len > remain) {
4155 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4156 			break;
4157 		}
4158 
4159 		tx_data += align_bytes;
4160 
4161 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4162 		tx_data += TX_CMD_LEN;
4163 
4164 		len = skb->len;
4165 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4166 			struct net_device_stats *stats = &dev->net->stats;
4167 
4168 			stats->tx_dropped++;
4169 			dev_kfree_skb_any(skb);
4170 			tx_data -= TX_CMD_LEN;
4171 			continue;
4172 		}
4173 
4174 		tx_data += len;
4175 		entry->length += len;
4176 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4177 
4178 		dev_kfree_skb_any(skb);
4179 
4180 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4181 
4182 		remain = dev->tx_urb_size - urb_len;
4183 	}
4184 
4185 	skb_put(tx_buf, urb_len);
4186 
4187 	return entry;
4188 }
4189 
4190 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4191 {
4192 	int ret;
4193 
4194 	/* Start the stack Tx queue if it was stopped
4195 	 */
4196 	netif_tx_lock(dev->net);
4197 	if (netif_queue_stopped(dev->net)) {
4198 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4199 			netif_wake_queue(dev->net);
4200 	}
4201 	netif_tx_unlock(dev->net);
4202 
4203 	/* Go through the Tx pending queue and set up URBs to transfer
4204 	 * the data to the device. Stop if no more pending data or URBs,
4205 	 * or if an error occurs when a URB is submitted.
4206 	 */
4207 	do {
4208 		struct skb_data *entry;
4209 		struct sk_buff *tx_buf;
4210 		unsigned long flags;
4211 
4212 		if (skb_queue_empty(&dev->txq_pend))
4213 			break;
4214 
4215 		tx_buf = lan78xx_get_tx_buf(dev);
4216 		if (!tx_buf)
4217 			break;
4218 
4219 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4220 
4221 		spin_lock_irqsave(&dev->txq.lock, flags);
4222 		ret = usb_autopm_get_interface_async(dev->intf);
4223 		if (ret < 0) {
4224 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4225 			goto out;
4226 		}
4227 
4228 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4229 				  tx_buf->data, tx_buf->len, tx_complete,
4230 				  tx_buf);
4231 
4232 		if (tx_buf->len % dev->maxpacket == 0) {
4233 			/* send USB_ZERO_PACKET */
4234 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4235 		}
4236 
4237 #ifdef CONFIG_PM
4238 		/* if device is asleep stop outgoing packet processing */
4239 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4240 			usb_anchor_urb(entry->urb, &dev->deferred);
4241 			netif_stop_queue(dev->net);
4242 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4243 			netdev_dbg(dev->net,
4244 				   "Delaying transmission for resumption\n");
4245 			return;
4246 		}
4247 #endif
4248 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4249 		switch (ret) {
4250 		case 0:
4251 			netif_trans_update(dev->net);
4252 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4253 			break;
4254 		case -EPIPE:
4255 			netif_stop_queue(dev->net);
4256 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4257 			usb_autopm_put_interface_async(dev->intf);
4258 			break;
4259 		case -ENODEV:
4260 		case -ENOENT:
4261 			netif_dbg(dev, tx_err, dev->net,
4262 				  "tx submit urb err %d (disconnected?)", ret);
4263 			netif_device_detach(dev->net);
4264 			break;
4265 		default:
4266 			usb_autopm_put_interface_async(dev->intf);
4267 			netif_dbg(dev, tx_err, dev->net,
4268 				  "tx submit urb err %d\n", ret);
4269 			break;
4270 		}
4271 
4272 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4273 
4274 		if (ret) {
4275 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4276 out:
4277 			dev->net->stats.tx_dropped += entry->num_of_packet;
4278 			lan78xx_release_tx_buf(dev, tx_buf);
4279 		}
4280 	} while (ret == 0);
4281 }
4282 
4283 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4284 {
4285 	struct sk_buff_head done;
4286 	struct sk_buff *rx_buf;
4287 	struct skb_data *entry;
4288 	unsigned long flags;
4289 	int work_done = 0;
4290 
4291 	/* Pass frames received in the last NAPI cycle before
4292 	 * working on newly completed URBs.
4293 	 */
4294 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4295 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4296 		++work_done;
4297 	}
4298 
4299 	/* Take a snapshot of the done queue and move items to a
4300 	 * temporary queue. Rx URB completions will continue to add
4301 	 * to the done queue.
4302 	 */
4303 	__skb_queue_head_init(&done);
4304 
4305 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4306 	skb_queue_splice_init(&dev->rxq_done, &done);
4307 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4308 
4309 	/* Extract receive frames from completed URBs and
4310 	 * pass them to the stack. Re-submit each completed URB.
4311 	 */
4312 	while ((work_done < budget) &&
4313 	       (rx_buf = __skb_dequeue(&done))) {
4314 		entry = (struct skb_data *)(rx_buf->cb);
4315 		switch (entry->state) {
4316 		case rx_done:
4317 			rx_process(dev, rx_buf, budget, &work_done);
4318 			break;
4319 		case rx_cleanup:
4320 			break;
4321 		default:
4322 			netdev_dbg(dev->net, "rx buf state %d\n",
4323 				   entry->state);
4324 			break;
4325 		}
4326 
4327 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4328 	}
4329 
4330 	/* If budget was consumed before processing all the URBs put them
4331 	 * back on the front of the done queue. They will be first to be
4332 	 * processed in the next NAPI cycle.
4333 	 */
4334 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4335 	skb_queue_splice(&done, &dev->rxq_done);
4336 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4337 
4338 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4339 		/* reset update timer delta */
4340 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4341 			dev->delta = 1;
4342 			mod_timer(&dev->stat_monitor,
4343 				  jiffies + STAT_UPDATE_TIMER);
4344 		}
4345 
4346 		/* Submit all free Rx URBs */
4347 
4348 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4349 			lan78xx_rx_urb_submit_all(dev);
4350 
4351 		/* Submit new Tx URBs */
4352 
4353 		lan78xx_tx_bh(dev);
4354 	}
4355 
4356 	return work_done;
4357 }
4358 
4359 static int lan78xx_poll(struct napi_struct *napi, int budget)
4360 {
4361 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4362 	int result = budget;
4363 	int work_done;
4364 
4365 	/* Don't do any work if the device is suspended */
4366 
4367 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4368 		napi_complete_done(napi, 0);
4369 		return 0;
4370 	}
4371 
4372 	/* Process completed URBs and submit new URBs */
4373 
4374 	work_done = lan78xx_bh(dev, budget);
4375 
4376 	if (work_done < budget) {
4377 		napi_complete_done(napi, work_done);
4378 
4379 		/* Start a new polling cycle if data was received or
4380 		 * data is waiting to be transmitted.
4381 		 */
4382 		if (!skb_queue_empty(&dev->rxq_done)) {
4383 			napi_schedule(napi);
4384 		} else if (netif_carrier_ok(dev->net)) {
4385 			if (skb_queue_empty(&dev->txq) &&
4386 			    !skb_queue_empty(&dev->txq_pend)) {
4387 				napi_schedule(napi);
4388 			} else {
4389 				netif_tx_lock(dev->net);
4390 				if (netif_queue_stopped(dev->net)) {
4391 					netif_wake_queue(dev->net);
4392 					napi_schedule(napi);
4393 				}
4394 				netif_tx_unlock(dev->net);
4395 			}
4396 		}
4397 		result = work_done;
4398 	}
4399 
4400 	return result;
4401 }
4402 
4403 static void lan78xx_delayedwork(struct work_struct *work)
4404 {
4405 	int status;
4406 	struct lan78xx_net *dev;
4407 
4408 	dev = container_of(work, struct lan78xx_net, wq.work);
4409 
4410 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4411 		return;
4412 
4413 	if (usb_autopm_get_interface(dev->intf) < 0)
4414 		return;
4415 
4416 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4417 		unlink_urbs(dev, &dev->txq);
4418 
4419 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4420 		if (status < 0 &&
4421 		    status != -EPIPE &&
4422 		    status != -ESHUTDOWN) {
4423 			if (netif_msg_tx_err(dev))
4424 				netdev_err(dev->net,
4425 					   "can't clear tx halt, status %d\n",
4426 					   status);
4427 		} else {
4428 			clear_bit(EVENT_TX_HALT, &dev->flags);
4429 			if (status != -ESHUTDOWN)
4430 				netif_wake_queue(dev->net);
4431 		}
4432 	}
4433 
4434 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4435 		unlink_urbs(dev, &dev->rxq);
4436 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4437 		if (status < 0 &&
4438 		    status != -EPIPE &&
4439 		    status != -ESHUTDOWN) {
4440 			if (netif_msg_rx_err(dev))
4441 				netdev_err(dev->net,
4442 					   "can't clear rx halt, status %d\n",
4443 					   status);
4444 		} else {
4445 			clear_bit(EVENT_RX_HALT, &dev->flags);
4446 			napi_schedule(&dev->napi);
4447 		}
4448 	}
4449 
4450 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4451 		int ret = 0;
4452 
4453 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4454 		ret = lan78xx_phy_int_ack(dev);
4455 		if (ret)
4456 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4457 				    ERR_PTR(ret));
4458 	}
4459 
4460 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4461 		lan78xx_update_stats(dev);
4462 
4463 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4464 
4465 		mod_timer(&dev->stat_monitor,
4466 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4467 
4468 		dev->delta = min((dev->delta * 2), 50);
4469 	}
4470 
4471 	usb_autopm_put_interface(dev->intf);
4472 }
4473 
4474 static void intr_complete(struct urb *urb)
4475 {
4476 	struct lan78xx_net *dev = urb->context;
4477 	int status = urb->status;
4478 
4479 	switch (status) {
4480 	/* success */
4481 	case 0:
4482 		lan78xx_status(dev, urb);
4483 		break;
4484 
4485 	/* software-driven interface shutdown */
4486 	case -ENOENT:			/* urb killed */
4487 	case -ENODEV:			/* hardware gone */
4488 	case -ESHUTDOWN:		/* hardware gone */
4489 		netif_dbg(dev, ifdown, dev->net,
4490 			  "intr shutdown, code %d\n", status);
4491 		return;
4492 
4493 	/* NOTE:  not throttling like RX/TX, since this endpoint
4494 	 * already polls infrequently
4495 	 */
4496 	default:
4497 		netdev_dbg(dev->net, "intr status %d\n", status);
4498 		break;
4499 	}
4500 
4501 	if (!netif_device_present(dev->net) ||
4502 	    !netif_running(dev->net)) {
4503 		netdev_warn(dev->net, "not submitting new status URB");
4504 		return;
4505 	}
4506 
4507 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4508 	status = usb_submit_urb(urb, GFP_ATOMIC);
4509 
4510 	switch (status) {
4511 	case  0:
4512 		break;
4513 	case -ENODEV:
4514 	case -ENOENT:
4515 		netif_dbg(dev, timer, dev->net,
4516 			  "intr resubmit %d (disconnect?)", status);
4517 		netif_device_detach(dev->net);
4518 		break;
4519 	default:
4520 		netif_err(dev, timer, dev->net,
4521 			  "intr resubmit --> %d\n", status);
4522 		break;
4523 	}
4524 }
4525 
4526 static void lan78xx_disconnect(struct usb_interface *intf)
4527 {
4528 	struct lan78xx_net *dev;
4529 	struct usb_device *udev;
4530 	struct net_device *net;
4531 
4532 	dev = usb_get_intfdata(intf);
4533 	usb_set_intfdata(intf, NULL);
4534 	if (!dev)
4535 		return;
4536 
4537 	udev = interface_to_usbdev(intf);
4538 	net = dev->net;
4539 
4540 	rtnl_lock();
4541 	phylink_stop(dev->phylink);
4542 	phylink_disconnect_phy(dev->phylink);
4543 	rtnl_unlock();
4544 
4545 	netif_napi_del(&dev->napi);
4546 
4547 	unregister_netdev(net);
4548 
4549 	timer_shutdown_sync(&dev->stat_monitor);
4550 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4551 	cancel_delayed_work_sync(&dev->wq);
4552 
4553 	phylink_destroy(dev->phylink);
4554 
4555 	usb_scuttle_anchored_urbs(&dev->deferred);
4556 
4557 	lan78xx_unbind(dev, intf);
4558 
4559 	lan78xx_free_tx_resources(dev);
4560 	lan78xx_free_rx_resources(dev);
4561 
4562 	usb_kill_urb(dev->urb_intr);
4563 	usb_free_urb(dev->urb_intr);
4564 
4565 	free_netdev(net);
4566 	usb_put_dev(udev);
4567 }
4568 
4569 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4570 {
4571 	struct lan78xx_net *dev = netdev_priv(net);
4572 
4573 	unlink_urbs(dev, &dev->txq);
4574 	napi_schedule(&dev->napi);
4575 }
4576 
4577 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4578 						struct net_device *netdev,
4579 						netdev_features_t features)
4580 {
4581 	struct lan78xx_net *dev = netdev_priv(netdev);
4582 
4583 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4584 		features &= ~NETIF_F_GSO_MASK;
4585 
4586 	features = vlan_features_check(skb, features);
4587 	features = vxlan_features_check(skb, features);
4588 
4589 	return features;
4590 }
4591 
4592 static const struct net_device_ops lan78xx_netdev_ops = {
4593 	.ndo_open		= lan78xx_open,
4594 	.ndo_stop		= lan78xx_stop,
4595 	.ndo_start_xmit		= lan78xx_start_xmit,
4596 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4597 	.ndo_change_mtu		= lan78xx_change_mtu,
4598 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4599 	.ndo_validate_addr	= eth_validate_addr,
4600 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4601 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4602 	.ndo_set_features	= lan78xx_set_features,
4603 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4604 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4605 	.ndo_features_check	= lan78xx_features_check,
4606 };
4607 
4608 static void lan78xx_stat_monitor(struct timer_list *t)
4609 {
4610 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4611 
4612 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4613 }
4614 
4615 static int lan78xx_probe(struct usb_interface *intf,
4616 			 const struct usb_device_id *id)
4617 {
4618 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4619 	struct lan78xx_net *dev;
4620 	struct net_device *netdev;
4621 	struct usb_device *udev;
4622 	int ret;
4623 	unsigned int maxp;
4624 	unsigned int period;
4625 	u8 *buf = NULL;
4626 
4627 	udev = interface_to_usbdev(intf);
4628 	udev = usb_get_dev(udev);
4629 
4630 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4631 	if (!netdev) {
4632 		dev_err(&intf->dev, "Error: OOM\n");
4633 		ret = -ENOMEM;
4634 		goto out1;
4635 	}
4636 
4637 	SET_NETDEV_DEV(netdev, &intf->dev);
4638 
4639 	dev = netdev_priv(netdev);
4640 	dev->udev = udev;
4641 	dev->intf = intf;
4642 	dev->net = netdev;
4643 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4644 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4645 
4646 	skb_queue_head_init(&dev->rxq);
4647 	skb_queue_head_init(&dev->txq);
4648 	skb_queue_head_init(&dev->rxq_done);
4649 	skb_queue_head_init(&dev->txq_pend);
4650 	skb_queue_head_init(&dev->rxq_overflow);
4651 	mutex_init(&dev->mdiobus_mutex);
4652 	mutex_init(&dev->dev_mutex);
4653 
4654 	ret = lan78xx_urb_config_init(dev);
4655 	if (ret < 0)
4656 		goto out2;
4657 
4658 	ret = lan78xx_alloc_tx_resources(dev);
4659 	if (ret < 0)
4660 		goto out2;
4661 
4662 	ret = lan78xx_alloc_rx_resources(dev);
4663 	if (ret < 0)
4664 		goto out3;
4665 
4666 	/* MTU range: 68 - 9000 */
4667 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4668 
4669 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4670 
4671 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4672 
4673 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4674 	init_usb_anchor(&dev->deferred);
4675 
4676 	netdev->netdev_ops = &lan78xx_netdev_ops;
4677 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4678 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4679 
4680 	dev->delta = 1;
4681 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4682 
4683 	mutex_init(&dev->stats.access_lock);
4684 
4685 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4686 		ret = -ENODEV;
4687 		goto out4;
4688 	}
4689 
4690 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4691 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4692 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4693 		ret = -ENODEV;
4694 		goto out4;
4695 	}
4696 
4697 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4698 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4699 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4700 		ret = -ENODEV;
4701 		goto out4;
4702 	}
4703 
4704 	ep_intr = &intf->cur_altsetting->endpoint[2];
4705 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4706 		ret = -ENODEV;
4707 		goto out4;
4708 	}
4709 
4710 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4711 					usb_endpoint_num(&ep_intr->desc));
4712 
4713 	ret = lan78xx_bind(dev, intf);
4714 	if (ret < 0)
4715 		goto out4;
4716 
4717 	period = ep_intr->desc.bInterval;
4718 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4719 
4720 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4721 	if (!dev->urb_intr) {
4722 		ret = -ENOMEM;
4723 		goto out5;
4724 	}
4725 
4726 	buf = kmalloc(maxp, GFP_KERNEL);
4727 	if (!buf) {
4728 		ret = -ENOMEM;
4729 		goto free_urbs;
4730 	}
4731 
4732 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4733 			 dev->pipe_intr, buf, maxp,
4734 			 intr_complete, dev, period);
4735 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4736 
4737 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4738 
4739 	/* Reject broken descriptors. */
4740 	if (dev->maxpacket == 0) {
4741 		ret = -ENODEV;
4742 		goto free_urbs;
4743 	}
4744 
4745 	/* driver requires remote-wakeup capability during autosuspend. */
4746 	intf->needs_remote_wakeup = 1;
4747 
4748 	ret = lan78xx_phy_init(dev);
4749 	if (ret < 0)
4750 		goto free_urbs;
4751 
4752 	ret = register_netdev(netdev);
4753 	if (ret != 0) {
4754 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4755 		goto phy_uninit;
4756 	}
4757 
4758 	usb_set_intfdata(intf, dev);
4759 
4760 	ret = device_set_wakeup_enable(&udev->dev, true);
4761 
4762 	 /* Default delay of 2sec has more overhead than advantage.
4763 	  * Set to 10sec as default.
4764 	  */
4765 	pm_runtime_set_autosuspend_delay(&udev->dev,
4766 					 DEFAULT_AUTOSUSPEND_DELAY);
4767 
4768 	return 0;
4769 
4770 phy_uninit:
4771 	lan78xx_phy_uninit(dev);
4772 free_urbs:
4773 	usb_free_urb(dev->urb_intr);
4774 out5:
4775 	lan78xx_unbind(dev, intf);
4776 out4:
4777 	netif_napi_del(&dev->napi);
4778 	lan78xx_free_rx_resources(dev);
4779 out3:
4780 	lan78xx_free_tx_resources(dev);
4781 out2:
4782 	free_netdev(netdev);
4783 out1:
4784 	usb_put_dev(udev);
4785 
4786 	return ret;
4787 }
4788 
4789 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4790 {
4791 	const u16 crc16poly = 0x8005;
4792 	int i;
4793 	u16 bit, crc, msb;
4794 	u8 data;
4795 
4796 	crc = 0xFFFF;
4797 	for (i = 0; i < len; i++) {
4798 		data = *buf++;
4799 		for (bit = 0; bit < 8; bit++) {
4800 			msb = crc >> 15;
4801 			crc <<= 1;
4802 
4803 			if (msb ^ (u16)(data & 1)) {
4804 				crc ^= crc16poly;
4805 				crc |= (u16)0x0001U;
4806 			}
4807 			data >>= 1;
4808 		}
4809 	}
4810 
4811 	return crc;
4812 }
4813 
4814 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4815 {
4816 	u32 buf;
4817 	int ret;
4818 
4819 	ret = lan78xx_stop_tx_path(dev);
4820 	if (ret < 0)
4821 		return ret;
4822 
4823 	ret = lan78xx_stop_rx_path(dev);
4824 	if (ret < 0)
4825 		return ret;
4826 
4827 	/* auto suspend (selective suspend) */
4828 
4829 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4830 	if (ret < 0)
4831 		return ret;
4832 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4833 	if (ret < 0)
4834 		return ret;
4835 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4836 	if (ret < 0)
4837 		return ret;
4838 
4839 	/* set goodframe wakeup */
4840 
4841 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4842 	if (ret < 0)
4843 		return ret;
4844 
4845 	buf |= WUCSR_RFE_WAKE_EN_;
4846 	buf |= WUCSR_STORE_WAKE_;
4847 
4848 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4849 	if (ret < 0)
4850 		return ret;
4851 
4852 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4853 	if (ret < 0)
4854 		return ret;
4855 
4856 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4857 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4858 	buf |= PMT_CTL_PHY_WAKE_EN_;
4859 	buf |= PMT_CTL_WOL_EN_;
4860 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4861 	buf |= PMT_CTL_SUS_MODE_3_;
4862 
4863 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4864 	if (ret < 0)
4865 		return ret;
4866 
4867 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4868 	if (ret < 0)
4869 		return ret;
4870 
4871 	buf |= PMT_CTL_WUPS_MASK_;
4872 
4873 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4874 	if (ret < 0)
4875 		return ret;
4876 
4877 	ret = lan78xx_start_rx_path(dev);
4878 
4879 	return ret;
4880 }
4881 
4882 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4883 {
4884 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4885 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4886 	const u8 arp_type[2] = { 0x08, 0x06 };
4887 	u32 temp_pmt_ctl;
4888 	int mask_index;
4889 	u32 temp_wucsr;
4890 	u32 buf;
4891 	u16 crc;
4892 	int ret;
4893 
4894 	ret = lan78xx_stop_tx_path(dev);
4895 	if (ret < 0)
4896 		return ret;
4897 	ret = lan78xx_stop_rx_path(dev);
4898 	if (ret < 0)
4899 		return ret;
4900 
4901 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4902 	if (ret < 0)
4903 		return ret;
4904 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4905 	if (ret < 0)
4906 		return ret;
4907 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4908 	if (ret < 0)
4909 		return ret;
4910 
4911 	temp_wucsr = 0;
4912 
4913 	temp_pmt_ctl = 0;
4914 
4915 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4916 	if (ret < 0)
4917 		return ret;
4918 
4919 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4920 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4921 
4922 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4923 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4924 		if (ret < 0)
4925 			return ret;
4926 	}
4927 
4928 	mask_index = 0;
4929 	if (wol & WAKE_PHY) {
4930 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4931 
4932 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4933 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4934 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4935 	}
4936 	if (wol & WAKE_MAGIC) {
4937 		temp_wucsr |= WUCSR_MPEN_;
4938 
4939 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4940 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4941 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4942 	}
4943 	if (wol & WAKE_BCAST) {
4944 		temp_wucsr |= WUCSR_BCST_EN_;
4945 
4946 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4947 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4948 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4949 	}
4950 	if (wol & WAKE_MCAST) {
4951 		temp_wucsr |= WUCSR_WAKE_EN_;
4952 
4953 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4954 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4955 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4956 					WUF_CFGX_EN_ |
4957 					WUF_CFGX_TYPE_MCAST_ |
4958 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4959 					(crc & WUF_CFGX_CRC16_MASK_));
4960 		if (ret < 0)
4961 			return ret;
4962 
4963 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4964 		if (ret < 0)
4965 			return ret;
4966 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4967 		if (ret < 0)
4968 			return ret;
4969 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4970 		if (ret < 0)
4971 			return ret;
4972 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4973 		if (ret < 0)
4974 			return ret;
4975 
4976 		mask_index++;
4977 
4978 		/* for IPv6 Multicast */
4979 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4980 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4981 					WUF_CFGX_EN_ |
4982 					WUF_CFGX_TYPE_MCAST_ |
4983 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4984 					(crc & WUF_CFGX_CRC16_MASK_));
4985 		if (ret < 0)
4986 			return ret;
4987 
4988 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4989 		if (ret < 0)
4990 			return ret;
4991 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4992 		if (ret < 0)
4993 			return ret;
4994 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4995 		if (ret < 0)
4996 			return ret;
4997 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4998 		if (ret < 0)
4999 			return ret;
5000 
5001 		mask_index++;
5002 
5003 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5004 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5005 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5006 	}
5007 	if (wol & WAKE_UCAST) {
5008 		temp_wucsr |= WUCSR_PFDA_EN_;
5009 
5010 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5011 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5012 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5013 	}
5014 	if (wol & WAKE_ARP) {
5015 		temp_wucsr |= WUCSR_WAKE_EN_;
5016 
5017 		/* set WUF_CFG & WUF_MASK
5018 		 * for packettype (offset 12,13) = ARP (0x0806)
5019 		 */
5020 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5021 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5022 					WUF_CFGX_EN_ |
5023 					WUF_CFGX_TYPE_ALL_ |
5024 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5025 					(crc & WUF_CFGX_CRC16_MASK_));
5026 		if (ret < 0)
5027 			return ret;
5028 
5029 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5030 		if (ret < 0)
5031 			return ret;
5032 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5033 		if (ret < 0)
5034 			return ret;
5035 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5036 		if (ret < 0)
5037 			return ret;
5038 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5039 		if (ret < 0)
5040 			return ret;
5041 
5042 		mask_index++;
5043 
5044 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5045 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5046 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5047 	}
5048 
5049 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5050 	if (ret < 0)
5051 		return ret;
5052 
5053 	/* when multiple WOL bits are set */
5054 	if (hweight_long((unsigned long)wol) > 1) {
5055 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5056 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5057 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5058 	}
5059 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5060 	if (ret < 0)
5061 		return ret;
5062 
5063 	/* clear WUPS */
5064 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5065 	if (ret < 0)
5066 		return ret;
5067 
5068 	buf |= PMT_CTL_WUPS_MASK_;
5069 
5070 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5071 	if (ret < 0)
5072 		return ret;
5073 
5074 	ret = lan78xx_start_rx_path(dev);
5075 
5076 	return ret;
5077 }
5078 
5079 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5080 {
5081 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5082 	bool dev_open;
5083 	int ret;
5084 
5085 	mutex_lock(&dev->dev_mutex);
5086 
5087 	netif_dbg(dev, ifdown, dev->net,
5088 		  "suspending: pm event %#x", message.event);
5089 
5090 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5091 
5092 	if (dev_open) {
5093 		spin_lock_irq(&dev->txq.lock);
5094 		/* don't autosuspend while transmitting */
5095 		if ((skb_queue_len(&dev->txq) ||
5096 		     skb_queue_len(&dev->txq_pend)) &&
5097 		    PMSG_IS_AUTO(message)) {
5098 			spin_unlock_irq(&dev->txq.lock);
5099 			ret = -EBUSY;
5100 			goto out;
5101 		} else {
5102 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5103 			spin_unlock_irq(&dev->txq.lock);
5104 		}
5105 
5106 		rtnl_lock();
5107 		phylink_suspend(dev->phylink, false);
5108 		rtnl_unlock();
5109 
5110 		/* stop RX */
5111 		ret = lan78xx_stop_rx_path(dev);
5112 		if (ret < 0)
5113 			goto out;
5114 
5115 		ret = lan78xx_flush_rx_fifo(dev);
5116 		if (ret < 0)
5117 			goto out;
5118 
5119 		/* stop Tx */
5120 		ret = lan78xx_stop_tx_path(dev);
5121 		if (ret < 0)
5122 			goto out;
5123 
5124 		/* empty out the Rx and Tx queues */
5125 		netif_device_detach(dev->net);
5126 		lan78xx_terminate_urbs(dev);
5127 		usb_kill_urb(dev->urb_intr);
5128 
5129 		/* reattach */
5130 		netif_device_attach(dev->net);
5131 
5132 		timer_delete(&dev->stat_monitor);
5133 
5134 		if (PMSG_IS_AUTO(message)) {
5135 			ret = lan78xx_set_auto_suspend(dev);
5136 			if (ret < 0)
5137 				goto out;
5138 		} else {
5139 			struct lan78xx_priv *pdata;
5140 
5141 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5142 			netif_carrier_off(dev->net);
5143 			ret = lan78xx_set_suspend(dev, pdata->wol);
5144 			if (ret < 0)
5145 				goto out;
5146 		}
5147 	} else {
5148 		/* Interface is down; don't allow WOL and PHY
5149 		 * events to wake up the host
5150 		 */
5151 		u32 buf;
5152 
5153 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5154 
5155 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5156 		if (ret < 0)
5157 			goto out;
5158 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5159 		if (ret < 0)
5160 			goto out;
5161 
5162 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5163 		if (ret < 0)
5164 			goto out;
5165 
5166 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5167 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5168 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5169 		buf |= PMT_CTL_SUS_MODE_3_;
5170 
5171 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5172 		if (ret < 0)
5173 			goto out;
5174 
5175 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5176 		if (ret < 0)
5177 			goto out;
5178 
5179 		buf |= PMT_CTL_WUPS_MASK_;
5180 
5181 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5182 		if (ret < 0)
5183 			goto out;
5184 	}
5185 
5186 	ret = 0;
5187 out:
5188 	mutex_unlock(&dev->dev_mutex);
5189 
5190 	return ret;
5191 }
5192 
5193 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5194 {
5195 	bool pipe_halted = false;
5196 	struct urb *urb;
5197 
5198 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5199 		struct sk_buff *skb = urb->context;
5200 		int ret;
5201 
5202 		if (!netif_device_present(dev->net) ||
5203 		    !netif_carrier_ok(dev->net) ||
5204 		    pipe_halted) {
5205 			lan78xx_release_tx_buf(dev, skb);
5206 			continue;
5207 		}
5208 
5209 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5210 
5211 		if (ret == 0) {
5212 			netif_trans_update(dev->net);
5213 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5214 		} else {
5215 			if (ret == -EPIPE) {
5216 				netif_stop_queue(dev->net);
5217 				pipe_halted = true;
5218 			} else if (ret == -ENODEV) {
5219 				netif_device_detach(dev->net);
5220 			}
5221 
5222 			lan78xx_release_tx_buf(dev, skb);
5223 		}
5224 	}
5225 
5226 	return pipe_halted;
5227 }
5228 
5229 static int lan78xx_resume(struct usb_interface *intf)
5230 {
5231 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5232 	bool dev_open;
5233 	int ret;
5234 
5235 	mutex_lock(&dev->dev_mutex);
5236 
5237 	netif_dbg(dev, ifup, dev->net, "resuming device");
5238 
5239 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5240 
5241 	if (dev_open) {
5242 		bool pipe_halted = false;
5243 
5244 		ret = lan78xx_flush_tx_fifo(dev);
5245 		if (ret < 0)
5246 			goto out;
5247 
5248 		if (dev->urb_intr) {
5249 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5250 
5251 			if (ret < 0) {
5252 				if (ret == -ENODEV)
5253 					netif_device_detach(dev->net);
5254 				netdev_warn(dev->net, "Failed to submit intr URB");
5255 			}
5256 		}
5257 
5258 		spin_lock_irq(&dev->txq.lock);
5259 
5260 		if (netif_device_present(dev->net)) {
5261 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5262 
5263 			if (pipe_halted)
5264 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5265 		}
5266 
5267 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5268 
5269 		spin_unlock_irq(&dev->txq.lock);
5270 
5271 		if (!pipe_halted &&
5272 		    netif_device_present(dev->net) &&
5273 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5274 			netif_start_queue(dev->net);
5275 
5276 		ret = lan78xx_start_tx_path(dev);
5277 		if (ret < 0)
5278 			goto out;
5279 
5280 		napi_schedule(&dev->napi);
5281 
5282 		if (!timer_pending(&dev->stat_monitor)) {
5283 			dev->delta = 1;
5284 			mod_timer(&dev->stat_monitor,
5285 				  jiffies + STAT_UPDATE_TIMER);
5286 		}
5287 
5288 	} else {
5289 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5290 	}
5291 
5292 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5293 	if (ret < 0)
5294 		goto out;
5295 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5296 	if (ret < 0)
5297 		goto out;
5298 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5299 	if (ret < 0)
5300 		goto out;
5301 
5302 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5303 					     WUCSR2_ARP_RCD_ |
5304 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5305 					     WUCSR2_IPV4_TCPSYN_RCD_);
5306 	if (ret < 0)
5307 		goto out;
5308 
5309 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5310 					    WUCSR_EEE_RX_WAKE_ |
5311 					    WUCSR_PFDA_FR_ |
5312 					    WUCSR_RFE_WAKE_FR_ |
5313 					    WUCSR_WUFR_ |
5314 					    WUCSR_MPR_ |
5315 					    WUCSR_BCST_FR_);
5316 	if (ret < 0)
5317 		goto out;
5318 
5319 	ret = 0;
5320 out:
5321 	mutex_unlock(&dev->dev_mutex);
5322 
5323 	return ret;
5324 }
5325 
5326 static int lan78xx_reset_resume(struct usb_interface *intf)
5327 {
5328 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5329 	int ret;
5330 
5331 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5332 
5333 	ret = lan78xx_reset(dev);
5334 	if (ret < 0)
5335 		return ret;
5336 
5337 	ret = lan78xx_resume(intf);
5338 	if (ret < 0)
5339 		return ret;
5340 
5341 	rtnl_lock();
5342 	phylink_resume(dev->phylink);
5343 	rtnl_unlock();
5344 
5345 	return 0;
5346 }
5347 
5348 static const struct usb_device_id products[] = {
5349 	{
5350 	/* LAN7800 USB Gigabit Ethernet Device */
5351 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5352 	},
5353 	{
5354 	/* LAN7850 USB Gigabit Ethernet Device */
5355 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5356 	},
5357 	{
5358 	/* LAN7801 USB Gigabit Ethernet Device */
5359 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5360 	},
5361 	{
5362 	/* ATM2-AF USB Gigabit Ethernet Device */
5363 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5364 	},
5365 	{},
5366 };
5367 MODULE_DEVICE_TABLE(usb, products);
5368 
5369 static struct usb_driver lan78xx_driver = {
5370 	.name			= DRIVER_NAME,
5371 	.id_table		= products,
5372 	.probe			= lan78xx_probe,
5373 	.disconnect		= lan78xx_disconnect,
5374 	.suspend		= lan78xx_suspend,
5375 	.resume			= lan78xx_resume,
5376 	.reset_resume		= lan78xx_reset_resume,
5377 	.supports_autosuspend	= 1,
5378 	.disable_hub_initiated_lpm = 1,
5379 };
5380 
5381 module_usb_driver(lan78xx_driver);
5382 
5383 MODULE_AUTHOR(DRIVER_AUTHOR);
5384 MODULE_DESCRIPTION(DRIVER_DESC);
5385 MODULE_LICENSE("GPL");
5386