xref: /linux/drivers/net/usb/lan78xx.c (revision 0ce92d548b44649a8de706f9bb9e74a4ed2f18a7)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1084 		return lan78xx_write_reg(dev, HW_CFG, saved);
1085 
1086 	return 0;
1087 }
1088 
1089 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1090 			       u32 length, u8 *data)
1091 {
1092 	int ret;
1093 	u8 sig;
1094 
1095 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1096 	if (ret < 0)
1097 		return ret;
1098 
1099 	if (sig != EEPROM_INDICATOR)
1100 		return -ENODATA;
1101 
1102 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1103 }
1104 
1105 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1106 				    u32 length, u8 *data)
1107 {
1108 	u32 val;
1109 	u32 saved;
1110 	int i, ret;
1111 
1112 	/* depends on chip, some EEPROM pins are muxed with LED function.
1113 	 * disable & restore LED function to access EEPROM.
1114 	 */
1115 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1116 	if (ret < 0)
1117 		return ret;
1118 
1119 	saved = val;
1120 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1121 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1122 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1123 		if (ret < 0)
1124 			return ret;
1125 	}
1126 
1127 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1128 	/* Looks like not USB specific error, try to recover */
1129 	if (ret == -ETIMEDOUT)
1130 		goto write_raw_eeprom_done;
1131 	/* If USB fails, there is nothing to do */
1132 	if (ret < 0)
1133 		return ret;
1134 
1135 	/* Issue write/erase enable command */
1136 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1137 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1138 	if (ret < 0)
1139 		return ret;
1140 
1141 	ret = lan78xx_wait_eeprom(dev);
1142 	/* Looks like not USB specific error, try to recover */
1143 	if (ret == -ETIMEDOUT)
1144 		goto write_raw_eeprom_done;
1145 	/* If USB fails, there is nothing to do */
1146 	if (ret < 0)
1147 		return ret;
1148 
1149 	for (i = 0; i < length; i++) {
1150 		/* Fill data register */
1151 		val = data[i];
1152 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1153 		if (ret < 0)
1154 			return ret;
1155 
1156 		/* Send "write" command */
1157 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1158 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1159 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1160 		if (ret < 0)
1161 			return ret;
1162 
1163 		ret = lan78xx_wait_eeprom(dev);
1164 		/* Looks like not USB specific error, try to recover */
1165 		if (ret == -ETIMEDOUT)
1166 			goto write_raw_eeprom_done;
1167 		/* If USB fails, there is nothing to do */
1168 		if (ret < 0)
1169 			return ret;
1170 
1171 		offset++;
1172 	}
1173 
1174 write_raw_eeprom_done:
1175 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1176 		return lan78xx_write_reg(dev, HW_CFG, saved);
1177 
1178 	return 0;
1179 }
1180 
1181 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1182 				u32 length, u8 *data)
1183 {
1184 	unsigned long timeout;
1185 	int ret, i;
1186 	u32 buf;
1187 
1188 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1189 	if (ret < 0)
1190 		return ret;
1191 
1192 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1193 		/* clear it and wait to be cleared */
1194 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1195 		if (ret < 0)
1196 			return ret;
1197 
1198 		timeout = jiffies + HZ;
1199 		do {
1200 			usleep_range(1, 10);
1201 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1202 			if (ret < 0)
1203 				return ret;
1204 
1205 			if (time_after(jiffies, timeout)) {
1206 				netdev_warn(dev->net,
1207 					    "timeout on OTP_PWR_DN");
1208 				return -ETIMEDOUT;
1209 			}
1210 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1211 	}
1212 
1213 	for (i = 0; i < length; i++) {
1214 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1215 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1216 		if (ret < 0)
1217 			return ret;
1218 
1219 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1220 					((offset + i) & OTP_ADDR2_10_3));
1221 		if (ret < 0)
1222 			return ret;
1223 
1224 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		timeout = jiffies + HZ;
1233 		do {
1234 			udelay(1);
1235 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1236 			if (ret < 0)
1237 				return ret;
1238 
1239 			if (time_after(jiffies, timeout)) {
1240 				netdev_warn(dev->net,
1241 					    "timeout on OTP_STATUS");
1242 				return -ETIMEDOUT;
1243 			}
1244 		} while (buf & OTP_STATUS_BUSY_);
1245 
1246 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1247 		if (ret < 0)
1248 			return ret;
1249 
1250 		data[i] = (u8)(buf & 0xFF);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1257 				 u32 length, u8 *data)
1258 {
1259 	int i;
1260 	u32 buf;
1261 	unsigned long timeout;
1262 	int ret;
1263 
1264 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1265 	if (ret < 0)
1266 		return ret;
1267 
1268 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1269 		/* clear it and wait to be cleared */
1270 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1271 		if (ret < 0)
1272 			return ret;
1273 
1274 		timeout = jiffies + HZ;
1275 		do {
1276 			udelay(1);
1277 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1278 			if (ret < 0)
1279 				return ret;
1280 
1281 			if (time_after(jiffies, timeout)) {
1282 				netdev_warn(dev->net,
1283 					    "timeout on OTP_PWR_DN completion");
1284 				return -ETIMEDOUT;
1285 			}
1286 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1287 	}
1288 
1289 	/* set to BYTE program mode */
1290 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1291 	if (ret < 0)
1292 		return ret;
1293 
1294 	for (i = 0; i < length; i++) {
1295 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1296 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1297 		if (ret < 0)
1298 			return ret;
1299 
1300 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1301 					((offset + i) & OTP_ADDR2_10_3));
1302 		if (ret < 0)
1303 			return ret;
1304 
1305 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		timeout = jiffies + HZ;
1318 		do {
1319 			udelay(1);
1320 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1321 			if (ret < 0)
1322 				return ret;
1323 
1324 			if (time_after(jiffies, timeout)) {
1325 				netdev_warn(dev->net,
1326 					    "Timeout on OTP_STATUS completion");
1327 				return -ETIMEDOUT;
1328 			}
1329 		} while (buf & OTP_STATUS_BUSY_);
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1336 			    u32 length, u8 *data)
1337 {
1338 	u8 sig;
1339 	int ret;
1340 
1341 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1342 
1343 	if (ret == 0) {
1344 		if (sig == OTP_INDICATOR_2)
1345 			offset += 0x100;
1346 		else if (sig != OTP_INDICATOR_1)
1347 			ret = -EINVAL;
1348 		if (!ret)
1349 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1350 	}
1351 
1352 	return ret;
1353 }
1354 
1355 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1356 {
1357 	int i, ret;
1358 
1359 	for (i = 0; i < 100; i++) {
1360 		u32 dp_sel;
1361 
1362 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1363 		if (unlikely(ret < 0))
1364 			return ret;
1365 
1366 		if (dp_sel & DP_SEL_DPRDY_)
1367 			return 0;
1368 
1369 		usleep_range(40, 100);
1370 	}
1371 
1372 	netdev_warn(dev->net, "%s timed out", __func__);
1373 
1374 	return -ETIMEDOUT;
1375 }
1376 
1377 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1378 				  u32 addr, u32 length, u32 *buf)
1379 {
1380 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1381 	int i, ret;
1382 
1383 	ret = usb_autopm_get_interface(dev->intf);
1384 	if (ret < 0)
1385 		return ret;
1386 
1387 	mutex_lock(&pdata->dataport_mutex);
1388 
1389 	ret = lan78xx_dataport_wait_not_busy(dev);
1390 	if (ret < 0)
1391 		goto dataport_write;
1392 
1393 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	for (i = 0; i < length; i++) {
1398 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1399 		if (ret < 0)
1400 			goto dataport_write;
1401 
1402 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_dataport_wait_not_busy(dev);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 	}
1414 
1415 dataport_write:
1416 	if (ret < 0)
1417 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1418 
1419 	mutex_unlock(&pdata->dataport_mutex);
1420 	usb_autopm_put_interface(dev->intf);
1421 
1422 	return ret;
1423 }
1424 
1425 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1426 				    int index, u8 addr[ETH_ALEN])
1427 {
1428 	u32 temp;
1429 
1430 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1431 		temp = addr[3];
1432 		temp = addr[2] | (temp << 8);
1433 		temp = addr[1] | (temp << 8);
1434 		temp = addr[0] | (temp << 8);
1435 		pdata->pfilter_table[index][1] = temp;
1436 		temp = addr[5];
1437 		temp = addr[4] | (temp << 8);
1438 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1439 		pdata->pfilter_table[index][0] = temp;
1440 	}
1441 }
1442 
1443 /* returns hash bit number for given MAC address */
1444 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1445 {
1446 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1447 }
1448 
1449 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1450 {
1451 	struct lan78xx_priv *pdata =
1452 			container_of(param, struct lan78xx_priv, set_multicast);
1453 	struct lan78xx_net *dev = pdata->dev;
1454 	int i, ret;
1455 
1456 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1457 		  pdata->rfe_ctl);
1458 
1459 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1460 				     DP_SEL_VHF_VLAN_LEN,
1461 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1462 	if (ret < 0)
1463 		goto multicast_write_done;
1464 
1465 	for (i = 1; i < NUM_OF_MAF; i++) {
1466 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1467 		if (ret < 0)
1468 			goto multicast_write_done;
1469 
1470 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1471 					pdata->pfilter_table[i][1]);
1472 		if (ret < 0)
1473 			goto multicast_write_done;
1474 
1475 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1476 					pdata->pfilter_table[i][0]);
1477 		if (ret < 0)
1478 			goto multicast_write_done;
1479 	}
1480 
1481 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1482 
1483 multicast_write_done:
1484 	if (ret < 0)
1485 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1486 	return;
1487 }
1488 
1489 static void lan78xx_set_multicast(struct net_device *netdev)
1490 {
1491 	struct lan78xx_net *dev = netdev_priv(netdev);
1492 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1493 	unsigned long flags;
1494 	int i;
1495 
1496 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1497 
1498 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1499 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1500 
1501 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1502 		pdata->mchash_table[i] = 0;
1503 
1504 	/* pfilter_table[0] has own HW address */
1505 	for (i = 1; i < NUM_OF_MAF; i++) {
1506 		pdata->pfilter_table[i][0] = 0;
1507 		pdata->pfilter_table[i][1] = 0;
1508 	}
1509 
1510 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1511 
1512 	if (dev->net->flags & IFF_PROMISC) {
1513 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1514 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1515 	} else {
1516 		if (dev->net->flags & IFF_ALLMULTI) {
1517 			netif_dbg(dev, drv, dev->net,
1518 				  "receive all multicast enabled");
1519 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1520 		}
1521 	}
1522 
1523 	if (netdev_mc_count(dev->net)) {
1524 		struct netdev_hw_addr *ha;
1525 		int i;
1526 
1527 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1528 
1529 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1530 
1531 		i = 1;
1532 		netdev_for_each_mc_addr(ha, netdev) {
1533 			/* set first 32 into Perfect Filter */
1534 			if (i < 33) {
1535 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1536 			} else {
1537 				u32 bitnum = lan78xx_hash(ha->addr);
1538 
1539 				pdata->mchash_table[bitnum / 32] |=
1540 							(1 << (bitnum % 32));
1541 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1542 			}
1543 			i++;
1544 		}
1545 	}
1546 
1547 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1548 
1549 	/* defer register writes to a sleepable context */
1550 	schedule_work(&pdata->set_multicast);
1551 }
1552 
1553 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1554 
1555 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1556 {
1557 	unsigned long start_time = jiffies;
1558 	u32 val;
1559 	int ret;
1560 
1561 	mutex_lock(&dev->mdiobus_mutex);
1562 
1563 	/* Resetting the device while there is activity on the MDIO
1564 	 * bus can result in the MAC interface locking up and not
1565 	 * completing register access transactions.
1566 	 */
1567 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1568 	if (ret < 0)
1569 		goto exit_unlock;
1570 
1571 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1572 	if (ret < 0)
1573 		goto exit_unlock;
1574 
1575 	val |= MAC_CR_RST_;
1576 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1577 	if (ret < 0)
1578 		goto exit_unlock;
1579 
1580 	/* Wait for the reset to complete before allowing any further
1581 	 * MAC register accesses otherwise the MAC may lock up.
1582 	 */
1583 	do {
1584 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1585 		if (ret < 0)
1586 			goto exit_unlock;
1587 
1588 		if (!(val & MAC_CR_RST_)) {
1589 			ret = 0;
1590 			goto exit_unlock;
1591 		}
1592 	} while (!time_after(jiffies, start_time + HZ));
1593 
1594 	ret = -ETIMEDOUT;
1595 exit_unlock:
1596 	mutex_unlock(&dev->mdiobus_mutex);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1603  * @dev: pointer to the LAN78xx device structure
1604  *
1605  * This function acknowledges the PHY interrupt by setting the
1606  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1607  *
1608  * Return: 0 on success or a negative error code on failure.
1609  */
1610 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1611 {
1612 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1613 }
1614 
1615 /* some work can't be done in tasklets, so we use keventd
1616  *
1617  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1618  * but tasklet_schedule() doesn't.	hope the failure is rare.
1619  */
1620 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1621 {
1622 	set_bit(work, &dev->flags);
1623 	if (!schedule_delayed_work(&dev->wq, 0))
1624 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1625 }
1626 
1627 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1628 {
1629 	u32 intdata;
1630 
1631 	if (urb->actual_length != 4) {
1632 		netdev_warn(dev->net,
1633 			    "unexpected urb length %d", urb->actual_length);
1634 		return;
1635 	}
1636 
1637 	intdata = get_unaligned_le32(urb->transfer_buffer);
1638 
1639 	if (intdata & INT_ENP_PHY_INT) {
1640 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1641 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1642 
1643 		if (dev->domain_data.phyirq > 0)
1644 			generic_handle_irq_safe(dev->domain_data.phyirq);
1645 	} else {
1646 		netdev_warn(dev->net,
1647 			    "unexpected interrupt: 0x%08x\n", intdata);
1648 	}
1649 }
1650 
1651 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1652 {
1653 	return MAX_EEPROM_SIZE;
1654 }
1655 
1656 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1657 				      struct ethtool_eeprom *ee, u8 *data)
1658 {
1659 	struct lan78xx_net *dev = netdev_priv(netdev);
1660 	int ret;
1661 
1662 	ret = usb_autopm_get_interface(dev->intf);
1663 	if (ret)
1664 		return ret;
1665 
1666 	ee->magic = LAN78XX_EEPROM_MAGIC;
1667 
1668 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1669 
1670 	usb_autopm_put_interface(dev->intf);
1671 
1672 	return ret;
1673 }
1674 
1675 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1676 				      struct ethtool_eeprom *ee, u8 *data)
1677 {
1678 	struct lan78xx_net *dev = netdev_priv(netdev);
1679 	int ret;
1680 
1681 	ret = usb_autopm_get_interface(dev->intf);
1682 	if (ret)
1683 		return ret;
1684 
1685 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1686 	 * to load data from EEPROM
1687 	 */
1688 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1689 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1690 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1691 		 (ee->offset == 0) &&
1692 		 (ee->len == 512) &&
1693 		 (data[0] == OTP_INDICATOR_1))
1694 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1695 
1696 	usb_autopm_put_interface(dev->intf);
1697 
1698 	return ret;
1699 }
1700 
1701 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1702 				u8 *data)
1703 {
1704 	if (stringset == ETH_SS_STATS)
1705 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1706 }
1707 
1708 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1709 {
1710 	if (sset == ETH_SS_STATS)
1711 		return ARRAY_SIZE(lan78xx_gstrings);
1712 	else
1713 		return -EOPNOTSUPP;
1714 }
1715 
1716 static void lan78xx_get_stats(struct net_device *netdev,
1717 			      struct ethtool_stats *stats, u64 *data)
1718 {
1719 	struct lan78xx_net *dev = netdev_priv(netdev);
1720 
1721 	lan78xx_update_stats(dev);
1722 
1723 	mutex_lock(&dev->stats.access_lock);
1724 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1725 	mutex_unlock(&dev->stats.access_lock);
1726 }
1727 
1728 static void lan78xx_get_wol(struct net_device *netdev,
1729 			    struct ethtool_wolinfo *wol)
1730 {
1731 	struct lan78xx_net *dev = netdev_priv(netdev);
1732 	int ret;
1733 	u32 buf;
1734 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1735 
1736 	if (usb_autopm_get_interface(dev->intf) < 0)
1737 		return;
1738 
1739 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1740 	if (unlikely(ret < 0)) {
1741 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1742 		wol->supported = 0;
1743 		wol->wolopts = 0;
1744 	} else {
1745 		if (buf & USB_CFG_RMT_WKP_) {
1746 			wol->supported = WAKE_ALL;
1747 			wol->wolopts = pdata->wol;
1748 		} else {
1749 			wol->supported = 0;
1750 			wol->wolopts = 0;
1751 		}
1752 	}
1753 
1754 	usb_autopm_put_interface(dev->intf);
1755 }
1756 
1757 static int lan78xx_set_wol(struct net_device *netdev,
1758 			   struct ethtool_wolinfo *wol)
1759 {
1760 	struct lan78xx_net *dev = netdev_priv(netdev);
1761 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1762 	int ret;
1763 
1764 	if (wol->wolopts & ~WAKE_ALL)
1765 		return -EINVAL;
1766 
1767 	ret = usb_autopm_get_interface(dev->intf);
1768 	if (ret < 0)
1769 		return ret;
1770 
1771 	pdata->wol = wol->wolopts;
1772 
1773 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1774 	if (ret < 0)
1775 		goto exit_pm_put;
1776 
1777 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1778 
1779 exit_pm_put:
1780 	usb_autopm_put_interface(dev->intf);
1781 
1782 	return ret;
1783 }
1784 
1785 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1786 {
1787 	struct lan78xx_net *dev = netdev_priv(net);
1788 
1789 	return phylink_ethtool_get_eee(dev->phylink, edata);
1790 }
1791 
1792 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1793 {
1794 	struct lan78xx_net *dev = netdev_priv(net);
1795 
1796 	return phylink_ethtool_set_eee(dev->phylink, edata);
1797 }
1798 
1799 static void lan78xx_get_drvinfo(struct net_device *net,
1800 				struct ethtool_drvinfo *info)
1801 {
1802 	struct lan78xx_net *dev = netdev_priv(net);
1803 
1804 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1805 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1806 }
1807 
1808 static u32 lan78xx_get_msglevel(struct net_device *net)
1809 {
1810 	struct lan78xx_net *dev = netdev_priv(net);
1811 
1812 	return dev->msg_enable;
1813 }
1814 
1815 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1816 {
1817 	struct lan78xx_net *dev = netdev_priv(net);
1818 
1819 	dev->msg_enable = level;
1820 }
1821 
1822 static int lan78xx_get_link_ksettings(struct net_device *net,
1823 				      struct ethtool_link_ksettings *cmd)
1824 {
1825 	struct lan78xx_net *dev = netdev_priv(net);
1826 
1827 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1828 }
1829 
1830 static int lan78xx_set_link_ksettings(struct net_device *net,
1831 				      const struct ethtool_link_ksettings *cmd)
1832 {
1833 	struct lan78xx_net *dev = netdev_priv(net);
1834 
1835 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1836 }
1837 
1838 static void lan78xx_get_pause(struct net_device *net,
1839 			      struct ethtool_pauseparam *pause)
1840 {
1841 	struct lan78xx_net *dev = netdev_priv(net);
1842 
1843 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1844 }
1845 
1846 static int lan78xx_set_pause(struct net_device *net,
1847 			     struct ethtool_pauseparam *pause)
1848 {
1849 	struct lan78xx_net *dev = netdev_priv(net);
1850 
1851 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1852 }
1853 
1854 static int lan78xx_get_regs_len(struct net_device *netdev)
1855 {
1856 	return sizeof(lan78xx_regs);
1857 }
1858 
1859 static void
1860 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1861 		 void *buf)
1862 {
1863 	struct lan78xx_net *dev = netdev_priv(netdev);
1864 	unsigned int data_count = 0;
1865 	u32 *data = buf;
1866 	int i, ret;
1867 
1868 	/* Read Device/MAC registers */
1869 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1870 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1871 		if (ret < 0) {
1872 			netdev_warn(dev->net,
1873 				    "failed to read register 0x%08x\n",
1874 				    lan78xx_regs[i]);
1875 			goto clean_data;
1876 		}
1877 
1878 		data_count++;
1879 	}
1880 
1881 	return;
1882 
1883 clean_data:
1884 	memset(data, 0, data_count * sizeof(u32));
1885 }
1886 
1887 static const struct ethtool_ops lan78xx_ethtool_ops = {
1888 	.get_link	= ethtool_op_get_link,
1889 	.nway_reset	= phy_ethtool_nway_reset,
1890 	.get_drvinfo	= lan78xx_get_drvinfo,
1891 	.get_msglevel	= lan78xx_get_msglevel,
1892 	.set_msglevel	= lan78xx_set_msglevel,
1893 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1894 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1895 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1896 	.get_ethtool_stats = lan78xx_get_stats,
1897 	.get_sset_count = lan78xx_get_sset_count,
1898 	.get_strings	= lan78xx_get_strings,
1899 	.get_wol	= lan78xx_get_wol,
1900 	.set_wol	= lan78xx_set_wol,
1901 	.get_ts_info	= ethtool_op_get_ts_info,
1902 	.get_eee	= lan78xx_get_eee,
1903 	.set_eee	= lan78xx_set_eee,
1904 	.get_pauseparam	= lan78xx_get_pause,
1905 	.set_pauseparam	= lan78xx_set_pause,
1906 	.get_link_ksettings = lan78xx_get_link_ksettings,
1907 	.set_link_ksettings = lan78xx_set_link_ksettings,
1908 	.get_regs_len	= lan78xx_get_regs_len,
1909 	.get_regs	= lan78xx_get_regs,
1910 };
1911 
1912 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1913 {
1914 	u32 addr_lo, addr_hi;
1915 	u8 addr[6];
1916 	int ret;
1917 
1918 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1919 	if (ret < 0)
1920 		return ret;
1921 
1922 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1923 	if (ret < 0)
1924 		return ret;
1925 
1926 	addr[0] = addr_lo & 0xFF;
1927 	addr[1] = (addr_lo >> 8) & 0xFF;
1928 	addr[2] = (addr_lo >> 16) & 0xFF;
1929 	addr[3] = (addr_lo >> 24) & 0xFF;
1930 	addr[4] = addr_hi & 0xFF;
1931 	addr[5] = (addr_hi >> 8) & 0xFF;
1932 
1933 	if (!is_valid_ether_addr(addr)) {
1934 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1935 			/* valid address present in Device Tree */
1936 			netif_dbg(dev, ifup, dev->net,
1937 				  "MAC address read from Device Tree");
1938 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1939 						 ETH_ALEN, addr) == 0) ||
1940 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1941 					      ETH_ALEN, addr) == 0)) &&
1942 			   is_valid_ether_addr(addr)) {
1943 			/* eeprom values are valid so use them */
1944 			netif_dbg(dev, ifup, dev->net,
1945 				  "MAC address read from EEPROM");
1946 		} else {
1947 			/* generate random MAC */
1948 			eth_random_addr(addr);
1949 			netif_dbg(dev, ifup, dev->net,
1950 				  "MAC address set to random addr");
1951 		}
1952 
1953 		addr_lo = addr[0] | (addr[1] << 8) |
1954 			  (addr[2] << 16) | (addr[3] << 24);
1955 		addr_hi = addr[4] | (addr[5] << 8);
1956 
1957 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1958 		if (ret < 0)
1959 			return ret;
1960 
1961 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1962 		if (ret < 0)
1963 			return ret;
1964 	}
1965 
1966 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1967 	if (ret < 0)
1968 		return ret;
1969 
1970 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1971 	if (ret < 0)
1972 		return ret;
1973 
1974 	eth_hw_addr_set(dev->net, addr);
1975 
1976 	return 0;
1977 }
1978 
1979 /* MDIO read and write wrappers for phylib */
1980 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1981 {
1982 	struct lan78xx_net *dev = bus->priv;
1983 	u32 val, addr;
1984 	int ret;
1985 
1986 	ret = usb_autopm_get_interface(dev->intf);
1987 	if (ret < 0)
1988 		return ret;
1989 
1990 	mutex_lock(&dev->mdiobus_mutex);
1991 
1992 	/* confirm MII not busy */
1993 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1994 	if (ret < 0)
1995 		goto done;
1996 
1997 	/* set the address, index & direction (read from PHY) */
1998 	addr = mii_access(phy_id, idx, MII_READ);
1999 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2000 	if (ret < 0)
2001 		goto done;
2002 
2003 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2004 	if (ret < 0)
2005 		goto done;
2006 
2007 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2008 	if (ret < 0)
2009 		goto done;
2010 
2011 	ret = (int)(val & 0xFFFF);
2012 
2013 done:
2014 	mutex_unlock(&dev->mdiobus_mutex);
2015 	usb_autopm_put_interface(dev->intf);
2016 
2017 	return ret;
2018 }
2019 
2020 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2021 				 u16 regval)
2022 {
2023 	struct lan78xx_net *dev = bus->priv;
2024 	u32 val, addr;
2025 	int ret;
2026 
2027 	ret = usb_autopm_get_interface(dev->intf);
2028 	if (ret < 0)
2029 		return ret;
2030 
2031 	mutex_lock(&dev->mdiobus_mutex);
2032 
2033 	/* confirm MII not busy */
2034 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2035 	if (ret < 0)
2036 		goto done;
2037 
2038 	val = (u32)regval;
2039 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2040 	if (ret < 0)
2041 		goto done;
2042 
2043 	/* set the address, index & direction (write to PHY) */
2044 	addr = mii_access(phy_id, idx, MII_WRITE);
2045 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2046 	if (ret < 0)
2047 		goto done;
2048 
2049 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2050 	if (ret < 0)
2051 		goto done;
2052 
2053 done:
2054 	mutex_unlock(&dev->mdiobus_mutex);
2055 	usb_autopm_put_interface(dev->intf);
2056 	return ret;
2057 }
2058 
2059 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2060 {
2061 	struct device_node *node;
2062 	int ret;
2063 
2064 	dev->mdiobus = mdiobus_alloc();
2065 	if (!dev->mdiobus) {
2066 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2067 		return -ENOMEM;
2068 	}
2069 
2070 	dev->mdiobus->priv = (void *)dev;
2071 	dev->mdiobus->read = lan78xx_mdiobus_read;
2072 	dev->mdiobus->write = lan78xx_mdiobus_write;
2073 	dev->mdiobus->name = "lan78xx-mdiobus";
2074 	dev->mdiobus->parent = &dev->udev->dev;
2075 
2076 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2077 		 dev->udev->bus->busnum, dev->udev->devnum);
2078 
2079 	switch (dev->chipid) {
2080 	case ID_REV_CHIP_ID_7800_:
2081 	case ID_REV_CHIP_ID_7850_:
2082 		/* set to internal PHY id */
2083 		dev->mdiobus->phy_mask = ~(1 << 1);
2084 		break;
2085 	case ID_REV_CHIP_ID_7801_:
2086 		/* scan thru PHYAD[2..0] */
2087 		dev->mdiobus->phy_mask = ~(0xFF);
2088 		break;
2089 	}
2090 
2091 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2092 	ret = of_mdiobus_register(dev->mdiobus, node);
2093 	of_node_put(node);
2094 	if (ret) {
2095 		netdev_err(dev->net, "can't register MDIO bus\n");
2096 		goto exit1;
2097 	}
2098 
2099 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2100 	return 0;
2101 exit1:
2102 	mdiobus_free(dev->mdiobus);
2103 	return ret;
2104 }
2105 
2106 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2107 {
2108 	mdiobus_unregister(dev->mdiobus);
2109 	mdiobus_free(dev->mdiobus);
2110 }
2111 
2112 static int irq_map(struct irq_domain *d, unsigned int irq,
2113 		   irq_hw_number_t hwirq)
2114 {
2115 	struct irq_domain_data *data = d->host_data;
2116 
2117 	irq_set_chip_data(irq, data);
2118 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2119 	irq_set_noprobe(irq);
2120 
2121 	return 0;
2122 }
2123 
2124 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2125 {
2126 	irq_set_chip_and_handler(irq, NULL, NULL);
2127 	irq_set_chip_data(irq, NULL);
2128 }
2129 
2130 static const struct irq_domain_ops chip_domain_ops = {
2131 	.map	= irq_map,
2132 	.unmap	= irq_unmap,
2133 };
2134 
2135 static void lan78xx_irq_mask(struct irq_data *irqd)
2136 {
2137 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2138 
2139 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2140 }
2141 
2142 static void lan78xx_irq_unmask(struct irq_data *irqd)
2143 {
2144 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2145 
2146 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2147 }
2148 
2149 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2150 {
2151 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2152 
2153 	mutex_lock(&data->irq_lock);
2154 }
2155 
2156 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2157 {
2158 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2159 	struct lan78xx_net *dev =
2160 			container_of(data, struct lan78xx_net, domain_data);
2161 	u32 buf;
2162 	int ret;
2163 
2164 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2165 	 * are only two callbacks executed in non-atomic contex.
2166 	 */
2167 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2168 	if (ret < 0)
2169 		goto irq_bus_sync_unlock;
2170 
2171 	if (buf != data->irqenable)
2172 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2173 
2174 irq_bus_sync_unlock:
2175 	if (ret < 0)
2176 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2177 			   ERR_PTR(ret));
2178 
2179 	mutex_unlock(&data->irq_lock);
2180 }
2181 
2182 static struct irq_chip lan78xx_irqchip = {
2183 	.name			= "lan78xx-irqs",
2184 	.irq_mask		= lan78xx_irq_mask,
2185 	.irq_unmask		= lan78xx_irq_unmask,
2186 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2187 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2188 };
2189 
2190 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2191 {
2192 	struct irq_domain *irqdomain;
2193 	unsigned int irqmap = 0;
2194 	u32 buf;
2195 	int ret = 0;
2196 
2197 	mutex_init(&dev->domain_data.irq_lock);
2198 
2199 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2200 	if (ret < 0)
2201 		return ret;
2202 
2203 	dev->domain_data.irqenable = buf;
2204 
2205 	dev->domain_data.irqchip = &lan78xx_irqchip;
2206 	dev->domain_data.irq_handler = handle_simple_irq;
2207 
2208 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2209 					     &chip_domain_ops, &dev->domain_data);
2210 	if (irqdomain) {
2211 		/* create mapping for PHY interrupt */
2212 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2213 		if (!irqmap) {
2214 			irq_domain_remove(irqdomain);
2215 
2216 			irqdomain = NULL;
2217 			ret = -EINVAL;
2218 		}
2219 	} else {
2220 		ret = -EINVAL;
2221 	}
2222 
2223 	dev->domain_data.irqdomain = irqdomain;
2224 	dev->domain_data.phyirq = irqmap;
2225 
2226 	return ret;
2227 }
2228 
2229 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2230 {
2231 	if (dev->domain_data.phyirq > 0) {
2232 		irq_dispose_mapping(dev->domain_data.phyirq);
2233 
2234 		if (dev->domain_data.irqdomain)
2235 			irq_domain_remove(dev->domain_data.irqdomain);
2236 	}
2237 	dev->domain_data.phyirq = 0;
2238 	dev->domain_data.irqdomain = NULL;
2239 }
2240 
2241 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2242 			       const struct phylink_link_state *state)
2243 {
2244 	struct net_device *net = to_net_dev(config->dev);
2245 	struct lan78xx_net *dev = netdev_priv(net);
2246 	u32 mac_cr = 0;
2247 	int ret;
2248 
2249 	/* Check if the mode is supported */
2250 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2251 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2252 		return;
2253 	}
2254 
2255 	switch (state->interface) {
2256 	case PHY_INTERFACE_MODE_GMII:
2257 		mac_cr |= MAC_CR_GMII_EN_;
2258 		break;
2259 	case PHY_INTERFACE_MODE_RGMII:
2260 	case PHY_INTERFACE_MODE_RGMII_ID:
2261 	case PHY_INTERFACE_MODE_RGMII_TXID:
2262 	case PHY_INTERFACE_MODE_RGMII_RXID:
2263 		break;
2264 	default:
2265 		netdev_warn(net, "Unsupported interface mode: %d\n",
2266 			    state->interface);
2267 		return;
2268 	}
2269 
2270 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2271 	if (ret < 0)
2272 		netdev_err(net, "Failed to config MAC with error %pe\n",
2273 			   ERR_PTR(ret));
2274 }
2275 
2276 static void lan78xx_mac_link_down(struct phylink_config *config,
2277 				  unsigned int mode, phy_interface_t interface)
2278 {
2279 	struct net_device *net = to_net_dev(config->dev);
2280 	struct lan78xx_net *dev = netdev_priv(net);
2281 	int ret;
2282 
2283 	netif_stop_queue(net);
2284 
2285 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2286 	 * manually before reset. TX and RX should be disabled before running
2287 	 * link_up sequence.
2288 	 */
2289 	ret = lan78xx_stop_tx_path(dev);
2290 	if (ret < 0)
2291 		goto link_down_fail;
2292 
2293 	ret = lan78xx_stop_rx_path(dev);
2294 	if (ret < 0)
2295 		goto link_down_fail;
2296 
2297 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2298 	 * really needed, but it was done in previous driver version. So, leave
2299 	 * it here.
2300 	 */
2301 	ret = lan78xx_mac_reset(dev);
2302 	if (ret < 0)
2303 		goto link_down_fail;
2304 
2305 	return;
2306 
2307 link_down_fail:
2308 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2309 		   ERR_PTR(ret));
2310 }
2311 
2312 /**
2313  * lan78xx_configure_usb - Configure USB link power settings
2314  * @dev: pointer to the LAN78xx device structure
2315  * @speed: negotiated Ethernet link speed (in Mbps)
2316  *
2317  * This function configures U1/U2 link power management for SuperSpeed
2318  * USB devices based on the current Ethernet link speed. It uses the
2319  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2320  *
2321  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2322  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2323  *
2324  * Return: 0 on success or a negative error code on failure.
2325  */
2326 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2327 {
2328 	u32 mask, val;
2329 	int ret;
2330 
2331 	/* Only configure USB settings for SuperSpeed devices */
2332 	if (dev->udev->speed != USB_SPEED_SUPER)
2333 		return 0;
2334 
2335 	/* LAN7850 does not support USB 3.x */
2336 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2337 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2338 		return 0;
2339 	}
2340 
2341 	switch (speed) {
2342 	case SPEED_1000:
2343 		/* Disable U2, enable U1 */
2344 		ret = lan78xx_update_reg(dev, USB_CFG1,
2345 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2346 		if (ret < 0)
2347 			return ret;
2348 
2349 		return lan78xx_update_reg(dev, USB_CFG1,
2350 					  USB_CFG1_DEV_U1_INIT_EN_,
2351 					  USB_CFG1_DEV_U1_INIT_EN_);
2352 
2353 	case SPEED_100:
2354 	case SPEED_10:
2355 		/* Enable both U1 and U2 */
2356 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2357 		val = mask;
2358 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2359 
2360 	default:
2361 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2362 		return -EINVAL;
2363 	}
2364 }
2365 
2366 /**
2367  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2368  * @dev: pointer to the LAN78xx device structure
2369  * @tx_pause: enable transmission of pause frames
2370  * @rx_pause: enable reception of pause frames
2371  *
2372  * This function configures the LAN78xx flow control settings by writing
2373  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2374  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2375  * based on USB speed.
2376  *
2377  * The Pause Time field is measured in units of 512-bit times (quanta):
2378  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2379  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2380  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2381  *
2382  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2383  *   - RXUSED is the number of bytes used in the RX FIFO
2384  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2385  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2386  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2387  *
2388  * Thresholds differ by USB speed because available USB bandwidth
2389  * affects how fast packets can be drained from the RX FIFO:
2390  *   - USB 3.x (SuperSpeed):
2391  *       FLOW_ON  = 9216 bytes → 18 units
2392  *       FLOW_OFF = 4096 bytes →  8 units
2393  *   - USB 2.0 (High-Speed):
2394  *       FLOW_ON  = 8704 bytes → 17 units
2395  *       FLOW_OFF = 1024 bytes →  2 units
2396  *
2397  * Note: The FCT_FLOW register must be configured before enabling TX pause
2398  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2399  *
2400  * Return: 0 on success or a negative error code on failure.
2401  */
2402 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2403 					 bool tx_pause, bool rx_pause)
2404 {
2405 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2406 	const u32 pause_time_quanta = 65535;
2407 	u32 fct_flow = 0;
2408 	u32 flow = 0;
2409 	int ret;
2410 
2411 	/* Prepare MAC flow control bits */
2412 	if (tx_pause)
2413 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2414 
2415 	if (rx_pause)
2416 		flow |= FLOW_CR_RX_FCEN_;
2417 
2418 	/* Select RX FIFO thresholds based on USB speed
2419 	 *
2420 	 * FCT_FLOW layout:
2421 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2422 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2423 	 *   thresholds are expressed in units of 512 bytes
2424 	 */
2425 	switch (dev->udev->speed) {
2426 	case USB_SPEED_SUPER:
2427 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2428 		break;
2429 	case USB_SPEED_HIGH:
2430 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2431 		break;
2432 	default:
2433 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2434 			    dev->udev->speed);
2435 		return -EINVAL;
2436 	}
2437 
2438 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2439 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2440 	if (ret < 0)
2441 		return ret;
2442 
2443 	/* Step 2: Enable MAC pause functionality */
2444 	return lan78xx_write_reg(dev, FLOW, flow);
2445 }
2446 
2447 static void lan78xx_mac_link_up(struct phylink_config *config,
2448 				struct phy_device *phy,
2449 				unsigned int mode, phy_interface_t interface,
2450 				int speed, int duplex,
2451 				bool tx_pause, bool rx_pause)
2452 {
2453 	struct net_device *net = to_net_dev(config->dev);
2454 	struct lan78xx_net *dev = netdev_priv(net);
2455 	u32 mac_cr = 0;
2456 	int ret;
2457 
2458 	switch (speed) {
2459 	case SPEED_1000:
2460 		mac_cr |= MAC_CR_SPEED_1000_;
2461 		break;
2462 	case SPEED_100:
2463 		mac_cr |= MAC_CR_SPEED_100_;
2464 		break;
2465 	case SPEED_10:
2466 		mac_cr |= MAC_CR_SPEED_10_;
2467 		break;
2468 	default:
2469 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2470 		return;
2471 	}
2472 
2473 	if (duplex == DUPLEX_FULL)
2474 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2475 
2476 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2477 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2478 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2479 	if (ret < 0)
2480 		goto link_up_fail;
2481 
2482 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2483 	if (ret < 0)
2484 		goto link_up_fail;
2485 
2486 	ret = lan78xx_configure_usb(dev, speed);
2487 	if (ret < 0)
2488 		goto link_up_fail;
2489 
2490 	lan78xx_rx_urb_submit_all(dev);
2491 
2492 	ret = lan78xx_flush_rx_fifo(dev);
2493 	if (ret < 0)
2494 		goto link_up_fail;
2495 
2496 	ret = lan78xx_flush_tx_fifo(dev);
2497 	if (ret < 0)
2498 		goto link_up_fail;
2499 
2500 	ret = lan78xx_start_tx_path(dev);
2501 	if (ret < 0)
2502 		goto link_up_fail;
2503 
2504 	ret = lan78xx_start_rx_path(dev);
2505 	if (ret < 0)
2506 		goto link_up_fail;
2507 
2508 	netif_start_queue(net);
2509 
2510 	return;
2511 
2512 link_up_fail:
2513 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2514 		   ERR_PTR(ret));
2515 }
2516 
2517 /**
2518  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2519  * @dev: LAN78xx device
2520  * @enable: true to enable EEE, false to disable
2521  *
2522  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2523  * Efficient Ethernet (EEE) operation. According to current understanding
2524  * of the LAN7800 documentation, this bit can be modified while TX and RX
2525  * are enabled. No explicit requirement was found to disable data paths
2526  * before changing this bit.
2527  *
2528  * Return: 0 on success or a negative error code
2529  */
2530 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2531 {
2532 	u32 mac_cr = 0;
2533 
2534 	if (enable)
2535 		mac_cr |= MAC_CR_EEE_EN_;
2536 
2537 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2538 }
2539 
2540 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2541 {
2542 	struct net_device *net = to_net_dev(config->dev);
2543 	struct lan78xx_net *dev = netdev_priv(net);
2544 
2545 	lan78xx_mac_eee_enable(dev, false);
2546 }
2547 
2548 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2549 				     bool tx_clk_stop)
2550 {
2551 	struct net_device *net = to_net_dev(config->dev);
2552 	struct lan78xx_net *dev = netdev_priv(net);
2553 	int ret;
2554 
2555 	/* Software should only change this field when Energy Efficient
2556 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2557 	 * EEEEN during probe, and phylink itself guarantees that
2558 	 * mac_disable_tx_lpi() will have been previously called.
2559 	 */
2560 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2561 	if (ret < 0)
2562 		return ret;
2563 
2564 	return lan78xx_mac_eee_enable(dev, true);
2565 }
2566 
2567 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2568 	.mac_config = lan78xx_mac_config,
2569 	.mac_link_down = lan78xx_mac_link_down,
2570 	.mac_link_up = lan78xx_mac_link_up,
2571 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2572 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2573 };
2574 
2575 /**
2576  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2577  * @dev: LAN78xx device
2578  *
2579  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2580  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2581  * to a switch without a visible PHY.
2582  *
2583  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2584  */
2585 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2586 {
2587 	static const struct phylink_link_state state = {
2588 		.speed = SPEED_1000,
2589 		.duplex = DUPLEX_FULL,
2590 	};
2591 
2592 	netdev_info(dev->net,
2593 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2594 
2595 	return phylink_set_fixed_link(dev->phylink, &state);
2596 }
2597 
2598 /**
2599  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2600  * @dev: LAN78xx device structure
2601  *
2602  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2603  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2604  * sets dev->interface based on chip ID and detected PHY type.
2605  *
2606  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2607  */
2608 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2609 {
2610 	struct phy_device *phydev;
2611 
2612 	/* Attempt to locate a PHY on the MDIO bus */
2613 	phydev = phy_find_first(dev->mdiobus);
2614 
2615 	switch (dev->chipid) {
2616 	case ID_REV_CHIP_ID_7801_:
2617 		if (phydev) {
2618 			/* External RGMII PHY detected */
2619 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2620 			phydev->is_internal = false;
2621 
2622 			if (!phydev->drv)
2623 				netdev_warn(dev->net,
2624 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2625 
2626 			return phydev;
2627 		}
2628 
2629 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2630 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2631 		return NULL;
2632 
2633 	case ID_REV_CHIP_ID_7800_:
2634 	case ID_REV_CHIP_ID_7850_:
2635 		if (!phydev)
2636 			return ERR_PTR(-ENODEV);
2637 
2638 		/* These use internal GMII-connected PHY */
2639 		dev->interface = PHY_INTERFACE_MODE_GMII;
2640 		phydev->is_internal = true;
2641 		return phydev;
2642 
2643 	default:
2644 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2645 		return ERR_PTR(-ENODEV);
2646 	}
2647 }
2648 
2649 /**
2650  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2651  * @dev: LAN78xx device
2652  *
2653  * Configure MAC-side registers according to dev->interface, which should be
2654  * set by lan78xx_get_phy().
2655  *
2656  * - For PHY_INTERFACE_MODE_RGMII:
2657  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2658  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2659  *   connected to the KSZ9897 switch, and the link timing is expected to be
2660  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2661  *   assumed here.
2662  *
2663  * - For PHY_INTERFACE_MODE_RGMII_ID:
2664  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2665  *
2666  * - For GMII, no MAC-specific config is needed.
2667  *
2668  * Return: 0 on success or a negative error code.
2669  */
2670 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2671 {
2672 	int ret;
2673 
2674 	switch (dev->interface) {
2675 	case PHY_INTERFACE_MODE_RGMII:
2676 		/* Enable MAC-side TX clock delay */
2677 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2678 					MAC_RGMII_ID_TXC_DELAY_EN_);
2679 		if (ret < 0)
2680 			return ret;
2681 
2682 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2683 		if (ret < 0)
2684 			return ret;
2685 
2686 		ret = lan78xx_update_reg(dev, HW_CFG,
2687 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2688 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2689 		if (ret < 0)
2690 			return ret;
2691 
2692 		break;
2693 
2694 	case PHY_INTERFACE_MODE_RGMII_ID:
2695 		/* Disable MAC-side TXC delay, PHY provides it */
2696 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2697 		if (ret < 0)
2698 			return ret;
2699 
2700 		break;
2701 
2702 	case PHY_INTERFACE_MODE_GMII:
2703 		/* No MAC-specific configuration required */
2704 		break;
2705 
2706 	default:
2707 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2708 			    dev->interface);
2709 		break;
2710 	}
2711 
2712 	return 0;
2713 }
2714 
2715 /**
2716  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2717  * @dev: LAN78xx device
2718  * @phydev: PHY device (must be valid)
2719  *
2720  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2721  * the corresponding number of LEDs by writing to HW_CFG.
2722  *
2723  * This helper preserves the original logic, enabling up to 4 LEDs.
2724  * If the property is not present, this function does nothing.
2725  *
2726  * Return: 0 on success or a negative error code.
2727  */
2728 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2729 					  struct phy_device *phydev)
2730 {
2731 	struct device_node *np = phydev->mdio.dev.of_node;
2732 	u32 reg;
2733 	int len, ret;
2734 
2735 	if (!np)
2736 		return 0;
2737 
2738 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2739 					      sizeof(u32));
2740 	if (len < 0)
2741 		return 0;
2742 
2743 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2744 	if (ret < 0)
2745 		return ret;
2746 
2747 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2748 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2749 
2750 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2751 	       (len > 1) * HW_CFG_LED1_EN_ |
2752 	       (len > 2) * HW_CFG_LED2_EN_ |
2753 	       (len > 3) * HW_CFG_LED3_EN_;
2754 
2755 	return lan78xx_write_reg(dev, HW_CFG, reg);
2756 }
2757 
2758 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2759 {
2760 	struct phylink_config *pc = &dev->phylink_config;
2761 	struct phylink *phylink;
2762 
2763 	pc->dev = &dev->net->dev;
2764 	pc->type = PHYLINK_NETDEV;
2765 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2766 			       MAC_100 | MAC_1000FD;
2767 	pc->mac_managed_pm = true;
2768 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2769 	/*
2770 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2771 	 *
2772 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2773 	 *
2774 	 * Reasoning:
2775 	 * According to the application note in the LAN7800 documentation, a
2776 	 * zero delay may negatively impact the TX data path’s ability to
2777 	 * support Gigabit operation. A value of 50us is recommended as a
2778 	 * reasonable default when the part operates at Gigabit speeds,
2779 	 * balancing stability and power efficiency in EEE mode. This delay can
2780 	 * be increased based on performance testing, as EEE is designed for
2781 	 * scenarios with mostly idle links and occasional bursts of full
2782 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2783 	 * performance without overly aggressive power optimization during
2784 	 * inactive periods.
2785 	 */
2786 	pc->lpi_timer_default = 50;
2787 	pc->eee_enabled_default = true;
2788 
2789 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2790 		phy_interface_set_rgmii(pc->supported_interfaces);
2791 	else
2792 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2793 
2794 	memcpy(dev->phylink_config.lpi_interfaces,
2795 	       dev->phylink_config.supported_interfaces,
2796 	       sizeof(dev->phylink_config.lpi_interfaces));
2797 
2798 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2799 				 dev->interface, &lan78xx_phylink_mac_ops);
2800 	if (IS_ERR(phylink))
2801 		return PTR_ERR(phylink);
2802 
2803 	dev->phylink = phylink;
2804 
2805 	return 0;
2806 }
2807 
2808 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2809 {
2810 	if (dev->phylink) {
2811 		phylink_disconnect_phy(dev->phylink);
2812 		phylink_destroy(dev->phylink);
2813 		dev->phylink = NULL;
2814 	}
2815 }
2816 
2817 static int lan78xx_phy_init(struct lan78xx_net *dev)
2818 {
2819 	struct phy_device *phydev;
2820 	int ret;
2821 
2822 	phydev = lan78xx_get_phy(dev);
2823 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2824 	 * which will use a fixed link later.
2825 	 * If an  error occurs, return the error code immediately.
2826 	 */
2827 	if (IS_ERR(phydev))
2828 		return PTR_ERR(phydev);
2829 
2830 	ret = lan78xx_phylink_setup(dev);
2831 	if (ret < 0)
2832 		return ret;
2833 
2834 	ret = lan78xx_mac_prepare_for_phy(dev);
2835 	if (ret < 0)
2836 		goto phylink_uninit;
2837 
2838 	/* If no PHY is found, set up a fixed link. It is very specific to
2839 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2840 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2841 	 * a visible PHY.
2842 	 */
2843 	if (!phydev) {
2844 		ret = lan78xx_set_fixed_link(dev);
2845 		if (ret < 0)
2846 			goto phylink_uninit;
2847 
2848 		/* No PHY found, so set up a fixed link and return early.
2849 		 * No need to configure PHY IRQ or attach to phylink.
2850 		 */
2851 		return 0;
2852 	}
2853 
2854 	/* if phyirq is not set, use polling mode in phylib */
2855 	if (dev->domain_data.phyirq > 0)
2856 		phydev->irq = dev->domain_data.phyirq;
2857 	else
2858 		phydev->irq = PHY_POLL;
2859 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2860 
2861 	ret = phylink_connect_phy(dev->phylink, phydev);
2862 	if (ret) {
2863 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2864 			   dev->mdiobus->id, ERR_PTR(ret));
2865 		goto phylink_uninit;
2866 	}
2867 
2868 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2869 	if (ret < 0)
2870 		goto phylink_uninit;
2871 
2872 	return 0;
2873 
2874 phylink_uninit:
2875 	lan78xx_phy_uninit(dev);
2876 
2877 	return ret;
2878 }
2879 
2880 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2881 {
2882 	bool rxenabled;
2883 	u32 buf;
2884 	int ret;
2885 
2886 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2887 	if (ret < 0)
2888 		return ret;
2889 
2890 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2891 
2892 	if (rxenabled) {
2893 		buf &= ~MAC_RX_RXEN_;
2894 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2895 		if (ret < 0)
2896 			return ret;
2897 	}
2898 
2899 	/* add 4 to size for FCS */
2900 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2901 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2902 
2903 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2904 	if (ret < 0)
2905 		return ret;
2906 
2907 	if (rxenabled) {
2908 		buf |= MAC_RX_RXEN_;
2909 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2910 		if (ret < 0)
2911 			return ret;
2912 	}
2913 
2914 	return 0;
2915 }
2916 
2917 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2918 {
2919 	struct sk_buff *skb;
2920 	unsigned long flags;
2921 	int count = 0;
2922 
2923 	spin_lock_irqsave(&q->lock, flags);
2924 	while (!skb_queue_empty(q)) {
2925 		struct skb_data	*entry;
2926 		struct urb *urb;
2927 		int ret;
2928 
2929 		skb_queue_walk(q, skb) {
2930 			entry = (struct skb_data *)skb->cb;
2931 			if (entry->state != unlink_start)
2932 				goto found;
2933 		}
2934 		break;
2935 found:
2936 		entry->state = unlink_start;
2937 		urb = entry->urb;
2938 
2939 		/* Get reference count of the URB to avoid it to be
2940 		 * freed during usb_unlink_urb, which may trigger
2941 		 * use-after-free problem inside usb_unlink_urb since
2942 		 * usb_unlink_urb is always racing with .complete
2943 		 * handler(include defer_bh).
2944 		 */
2945 		usb_get_urb(urb);
2946 		spin_unlock_irqrestore(&q->lock, flags);
2947 		/* during some PM-driven resume scenarios,
2948 		 * these (async) unlinks complete immediately
2949 		 */
2950 		ret = usb_unlink_urb(urb);
2951 		if (ret != -EINPROGRESS && ret != 0)
2952 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2953 		else
2954 			count++;
2955 		usb_put_urb(urb);
2956 		spin_lock_irqsave(&q->lock, flags);
2957 	}
2958 	spin_unlock_irqrestore(&q->lock, flags);
2959 	return count;
2960 }
2961 
2962 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2963 {
2964 	struct lan78xx_net *dev = netdev_priv(netdev);
2965 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2966 	int ret;
2967 
2968 	/* no second zero-length packet read wanted after mtu-sized packets */
2969 	if ((max_frame_len % dev->maxpacket) == 0)
2970 		return -EDOM;
2971 
2972 	ret = usb_autopm_get_interface(dev->intf);
2973 	if (ret < 0)
2974 		return ret;
2975 
2976 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2977 	if (ret < 0)
2978 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2979 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2980 	else
2981 		WRITE_ONCE(netdev->mtu, new_mtu);
2982 
2983 	usb_autopm_put_interface(dev->intf);
2984 
2985 	return ret;
2986 }
2987 
2988 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2989 {
2990 	struct lan78xx_net *dev = netdev_priv(netdev);
2991 	struct sockaddr *addr = p;
2992 	u32 addr_lo, addr_hi;
2993 	int ret;
2994 
2995 	if (netif_running(netdev))
2996 		return -EBUSY;
2997 
2998 	if (!is_valid_ether_addr(addr->sa_data))
2999 		return -EADDRNOTAVAIL;
3000 
3001 	eth_hw_addr_set(netdev, addr->sa_data);
3002 
3003 	addr_lo = netdev->dev_addr[0] |
3004 		  netdev->dev_addr[1] << 8 |
3005 		  netdev->dev_addr[2] << 16 |
3006 		  netdev->dev_addr[3] << 24;
3007 	addr_hi = netdev->dev_addr[4] |
3008 		  netdev->dev_addr[5] << 8;
3009 
3010 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3015 	if (ret < 0)
3016 		return ret;
3017 
3018 	/* Added to support MAC address changes */
3019 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3024 }
3025 
3026 /* Enable or disable Rx checksum offload engine */
3027 static int lan78xx_set_features(struct net_device *netdev,
3028 				netdev_features_t features)
3029 {
3030 	struct lan78xx_net *dev = netdev_priv(netdev);
3031 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3032 	unsigned long flags;
3033 
3034 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3035 
3036 	if (features & NETIF_F_RXCSUM) {
3037 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3038 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3039 	} else {
3040 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3041 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3042 	}
3043 
3044 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3045 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3046 	else
3047 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3048 
3049 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3050 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3051 	else
3052 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3053 
3054 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3055 
3056 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3057 }
3058 
3059 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3060 {
3061 	struct lan78xx_priv *pdata =
3062 			container_of(param, struct lan78xx_priv, set_vlan);
3063 	struct lan78xx_net *dev = pdata->dev;
3064 
3065 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3066 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3067 }
3068 
3069 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3070 				   __be16 proto, u16 vid)
3071 {
3072 	struct lan78xx_net *dev = netdev_priv(netdev);
3073 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3074 	u16 vid_bit_index;
3075 	u16 vid_dword_index;
3076 
3077 	vid_dword_index = (vid >> 5) & 0x7F;
3078 	vid_bit_index = vid & 0x1F;
3079 
3080 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3081 
3082 	/* defer register writes to a sleepable context */
3083 	schedule_work(&pdata->set_vlan);
3084 
3085 	return 0;
3086 }
3087 
3088 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3089 				    __be16 proto, u16 vid)
3090 {
3091 	struct lan78xx_net *dev = netdev_priv(netdev);
3092 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3093 	u16 vid_bit_index;
3094 	u16 vid_dword_index;
3095 
3096 	vid_dword_index = (vid >> 5) & 0x7F;
3097 	vid_bit_index = vid & 0x1F;
3098 
3099 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3100 
3101 	/* defer register writes to a sleepable context */
3102 	schedule_work(&pdata->set_vlan);
3103 
3104 	return 0;
3105 }
3106 
3107 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3108 {
3109 	u32 regs[6] = { 0 };
3110 	int ret;
3111 	u32 buf;
3112 
3113 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3114 	if (ret < 0)
3115 		goto init_ltm_failed;
3116 
3117 	if (buf & USB_CFG1_LTM_ENABLE_) {
3118 		u8 temp[2];
3119 		/* Get values from EEPROM first */
3120 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3121 			if (temp[0] == 24) {
3122 				ret = lan78xx_read_raw_eeprom(dev,
3123 							      temp[1] * 2,
3124 							      24,
3125 							      (u8 *)regs);
3126 				if (ret < 0)
3127 					return ret;
3128 			}
3129 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3130 			if (temp[0] == 24) {
3131 				ret = lan78xx_read_raw_otp(dev,
3132 							   temp[1] * 2,
3133 							   24,
3134 							   (u8 *)regs);
3135 				if (ret < 0)
3136 					return ret;
3137 			}
3138 		}
3139 	}
3140 
3141 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3142 	if (ret < 0)
3143 		goto init_ltm_failed;
3144 
3145 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3146 	if (ret < 0)
3147 		goto init_ltm_failed;
3148 
3149 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3150 	if (ret < 0)
3151 		goto init_ltm_failed;
3152 
3153 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3154 	if (ret < 0)
3155 		goto init_ltm_failed;
3156 
3157 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3158 	if (ret < 0)
3159 		goto init_ltm_failed;
3160 
3161 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3162 	if (ret < 0)
3163 		goto init_ltm_failed;
3164 
3165 	return 0;
3166 
3167 init_ltm_failed:
3168 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3169 	return ret;
3170 }
3171 
3172 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3173 {
3174 	int result = 0;
3175 
3176 	switch (dev->udev->speed) {
3177 	case USB_SPEED_SUPER:
3178 		dev->rx_urb_size = RX_SS_URB_SIZE;
3179 		dev->tx_urb_size = TX_SS_URB_SIZE;
3180 		dev->n_rx_urbs = RX_SS_URB_NUM;
3181 		dev->n_tx_urbs = TX_SS_URB_NUM;
3182 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3183 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3184 		break;
3185 	case USB_SPEED_HIGH:
3186 		dev->rx_urb_size = RX_HS_URB_SIZE;
3187 		dev->tx_urb_size = TX_HS_URB_SIZE;
3188 		dev->n_rx_urbs = RX_HS_URB_NUM;
3189 		dev->n_tx_urbs = TX_HS_URB_NUM;
3190 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3191 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3192 		break;
3193 	case USB_SPEED_FULL:
3194 		dev->rx_urb_size = RX_FS_URB_SIZE;
3195 		dev->tx_urb_size = TX_FS_URB_SIZE;
3196 		dev->n_rx_urbs = RX_FS_URB_NUM;
3197 		dev->n_tx_urbs = TX_FS_URB_NUM;
3198 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3199 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3200 		break;
3201 	default:
3202 		netdev_warn(dev->net, "USB bus speed not supported\n");
3203 		result = -EIO;
3204 		break;
3205 	}
3206 
3207 	return result;
3208 }
3209 
3210 static int lan78xx_reset(struct lan78xx_net *dev)
3211 {
3212 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3213 	unsigned long timeout;
3214 	int ret;
3215 	u32 buf;
3216 
3217 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3218 	if (ret < 0)
3219 		return ret;
3220 
3221 	buf |= HW_CFG_LRST_;
3222 
3223 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3224 	if (ret < 0)
3225 		return ret;
3226 
3227 	timeout = jiffies + HZ;
3228 	do {
3229 		mdelay(1);
3230 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3231 		if (ret < 0)
3232 			return ret;
3233 
3234 		if (time_after(jiffies, timeout)) {
3235 			netdev_warn(dev->net,
3236 				    "timeout on completion of LiteReset");
3237 			ret = -ETIMEDOUT;
3238 			return ret;
3239 		}
3240 	} while (buf & HW_CFG_LRST_);
3241 
3242 	ret = lan78xx_init_mac_address(dev);
3243 	if (ret < 0)
3244 		return ret;
3245 
3246 	/* save DEVID for later usage */
3247 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3248 	if (ret < 0)
3249 		return ret;
3250 
3251 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3252 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3253 
3254 	/* Respond to the IN token with a NAK */
3255 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3256 	if (ret < 0)
3257 		return ret;
3258 
3259 	buf |= USB_CFG_BIR_;
3260 
3261 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3262 	if (ret < 0)
3263 		return ret;
3264 
3265 	/* Init LTM */
3266 	ret = lan78xx_init_ltm(dev);
3267 	if (ret < 0)
3268 		return ret;
3269 
3270 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3271 	if (ret < 0)
3272 		return ret;
3273 
3274 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3275 	if (ret < 0)
3276 		return ret;
3277 
3278 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3279 	if (ret < 0)
3280 		return ret;
3281 
3282 	buf |= HW_CFG_MEF_;
3283 	buf |= HW_CFG_CLK125_EN_;
3284 	buf |= HW_CFG_REFCLK25_EN_;
3285 
3286 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3287 	if (ret < 0)
3288 		return ret;
3289 
3290 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3291 	if (ret < 0)
3292 		return ret;
3293 
3294 	buf |= USB_CFG_BCE_;
3295 
3296 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3297 	if (ret < 0)
3298 		return ret;
3299 
3300 	/* set FIFO sizes */
3301 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3302 
3303 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3304 	if (ret < 0)
3305 		return ret;
3306 
3307 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3308 
3309 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3310 	if (ret < 0)
3311 		return ret;
3312 
3313 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3314 	if (ret < 0)
3315 		return ret;
3316 
3317 	ret = lan78xx_write_reg(dev, FLOW, 0);
3318 	if (ret < 0)
3319 		return ret;
3320 
3321 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3322 	if (ret < 0)
3323 		return ret;
3324 
3325 	/* Don't need rfe_ctl_lock during initialisation */
3326 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3327 	if (ret < 0)
3328 		return ret;
3329 
3330 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3331 
3332 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3333 	if (ret < 0)
3334 		return ret;
3335 
3336 	/* Enable or disable checksum offload engines */
3337 	ret = lan78xx_set_features(dev->net, dev->net->features);
3338 	if (ret < 0)
3339 		return ret;
3340 
3341 	lan78xx_set_multicast(dev->net);
3342 
3343 	/* reset PHY */
3344 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3345 	if (ret < 0)
3346 		return ret;
3347 
3348 	buf |= PMT_CTL_PHY_RST_;
3349 
3350 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3351 	if (ret < 0)
3352 		return ret;
3353 
3354 	timeout = jiffies + HZ;
3355 	do {
3356 		mdelay(1);
3357 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3358 		if (ret < 0)
3359 			return ret;
3360 
3361 		if (time_after(jiffies, timeout)) {
3362 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3363 			ret = -ETIMEDOUT;
3364 			return ret;
3365 		}
3366 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3367 
3368 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3369 	if (ret < 0)
3370 		return ret;
3371 
3372 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3373 
3374 	/* LAN7801 only has RGMII mode */
3375 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3376 		buf &= ~MAC_CR_GMII_EN_;
3377 
3378 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3379 	if (ret < 0)
3380 		return ret;
3381 
3382 	ret = lan78xx_set_rx_max_frame_length(dev,
3383 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3384 
3385 	return ret;
3386 }
3387 
3388 static void lan78xx_init_stats(struct lan78xx_net *dev)
3389 {
3390 	u32 *p;
3391 	int i;
3392 
3393 	/* initialize for stats update
3394 	 * some counters are 20bits and some are 32bits
3395 	 */
3396 	p = (u32 *)&dev->stats.rollover_max;
3397 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3398 		p[i] = 0xFFFFF;
3399 
3400 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3401 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3402 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3403 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3404 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3405 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3406 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3407 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3408 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3409 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3410 
3411 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3412 }
3413 
3414 static int lan78xx_open(struct net_device *net)
3415 {
3416 	struct lan78xx_net *dev = netdev_priv(net);
3417 	int ret;
3418 
3419 	netif_dbg(dev, ifup, dev->net, "open device");
3420 
3421 	ret = usb_autopm_get_interface(dev->intf);
3422 	if (ret < 0)
3423 		return ret;
3424 
3425 	mutex_lock(&dev->dev_mutex);
3426 
3427 	lan78xx_init_stats(dev);
3428 
3429 	napi_enable(&dev->napi);
3430 
3431 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3432 
3433 	/* for Link Check */
3434 	if (dev->urb_intr) {
3435 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3436 		if (ret < 0) {
3437 			netif_err(dev, ifup, dev->net,
3438 				  "intr submit %d\n", ret);
3439 			goto done;
3440 		}
3441 	}
3442 
3443 	phylink_start(dev->phylink);
3444 
3445 done:
3446 	mutex_unlock(&dev->dev_mutex);
3447 
3448 	if (ret < 0)
3449 		usb_autopm_put_interface(dev->intf);
3450 
3451 	return ret;
3452 }
3453 
3454 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3455 {
3456 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3457 	DECLARE_WAITQUEUE(wait, current);
3458 	int temp;
3459 
3460 	/* ensure there are no more active urbs */
3461 	add_wait_queue(&unlink_wakeup, &wait);
3462 	set_current_state(TASK_UNINTERRUPTIBLE);
3463 	dev->wait = &unlink_wakeup;
3464 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3465 
3466 	/* maybe wait for deletions to finish. */
3467 	while (!skb_queue_empty(&dev->rxq) ||
3468 	       !skb_queue_empty(&dev->txq)) {
3469 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3470 		set_current_state(TASK_UNINTERRUPTIBLE);
3471 		netif_dbg(dev, ifdown, dev->net,
3472 			  "waited for %d urb completions", temp);
3473 	}
3474 	set_current_state(TASK_RUNNING);
3475 	dev->wait = NULL;
3476 	remove_wait_queue(&unlink_wakeup, &wait);
3477 
3478 	/* empty Rx done, Rx overflow and Tx pend queues
3479 	 */
3480 	while (!skb_queue_empty(&dev->rxq_done)) {
3481 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3482 
3483 		lan78xx_release_rx_buf(dev, skb);
3484 	}
3485 
3486 	skb_queue_purge(&dev->rxq_overflow);
3487 	skb_queue_purge(&dev->txq_pend);
3488 }
3489 
3490 static int lan78xx_stop(struct net_device *net)
3491 {
3492 	struct lan78xx_net *dev = netdev_priv(net);
3493 
3494 	netif_dbg(dev, ifup, dev->net, "stop device");
3495 
3496 	mutex_lock(&dev->dev_mutex);
3497 
3498 	if (timer_pending(&dev->stat_monitor))
3499 		timer_delete_sync(&dev->stat_monitor);
3500 
3501 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3502 	napi_disable(&dev->napi);
3503 
3504 	lan78xx_terminate_urbs(dev);
3505 
3506 	netif_info(dev, ifdown, dev->net,
3507 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3508 		   net->stats.rx_packets, net->stats.tx_packets,
3509 		   net->stats.rx_errors, net->stats.tx_errors);
3510 
3511 	phylink_stop(dev->phylink);
3512 
3513 	usb_kill_urb(dev->urb_intr);
3514 
3515 	/* deferred work (task, timer, softirq) must also stop.
3516 	 * can't flush_scheduled_work() until we drop rtnl (later),
3517 	 * else workers could deadlock; so make workers a NOP.
3518 	 */
3519 	clear_bit(EVENT_TX_HALT, &dev->flags);
3520 	clear_bit(EVENT_RX_HALT, &dev->flags);
3521 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3522 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3523 
3524 	cancel_delayed_work_sync(&dev->wq);
3525 
3526 	usb_autopm_put_interface(dev->intf);
3527 
3528 	mutex_unlock(&dev->dev_mutex);
3529 
3530 	return 0;
3531 }
3532 
3533 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3534 			       struct sk_buff_head *list, enum skb_state state)
3535 {
3536 	unsigned long flags;
3537 	enum skb_state old_state;
3538 	struct skb_data *entry = (struct skb_data *)skb->cb;
3539 
3540 	spin_lock_irqsave(&list->lock, flags);
3541 	old_state = entry->state;
3542 	entry->state = state;
3543 
3544 	__skb_unlink(skb, list);
3545 	spin_unlock(&list->lock);
3546 	spin_lock(&dev->rxq_done.lock);
3547 
3548 	__skb_queue_tail(&dev->rxq_done, skb);
3549 	if (skb_queue_len(&dev->rxq_done) == 1)
3550 		napi_schedule(&dev->napi);
3551 
3552 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3553 
3554 	return old_state;
3555 }
3556 
3557 static void tx_complete(struct urb *urb)
3558 {
3559 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3560 	struct skb_data *entry = (struct skb_data *)skb->cb;
3561 	struct lan78xx_net *dev = entry->dev;
3562 
3563 	if (urb->status == 0) {
3564 		dev->net->stats.tx_packets += entry->num_of_packet;
3565 		dev->net->stats.tx_bytes += entry->length;
3566 	} else {
3567 		dev->net->stats.tx_errors += entry->num_of_packet;
3568 
3569 		switch (urb->status) {
3570 		case -EPIPE:
3571 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3572 			break;
3573 
3574 		/* software-driven interface shutdown */
3575 		case -ECONNRESET:
3576 		case -ESHUTDOWN:
3577 			netif_dbg(dev, tx_err, dev->net,
3578 				  "tx err interface gone %d\n",
3579 				  entry->urb->status);
3580 			break;
3581 
3582 		case -EPROTO:
3583 		case -ETIME:
3584 		case -EILSEQ:
3585 			netif_stop_queue(dev->net);
3586 			netif_dbg(dev, tx_err, dev->net,
3587 				  "tx err queue stopped %d\n",
3588 				  entry->urb->status);
3589 			break;
3590 		default:
3591 			netif_dbg(dev, tx_err, dev->net,
3592 				  "unknown tx err %d\n",
3593 				  entry->urb->status);
3594 			break;
3595 		}
3596 	}
3597 
3598 	usb_autopm_put_interface_async(dev->intf);
3599 
3600 	skb_unlink(skb, &dev->txq);
3601 
3602 	lan78xx_release_tx_buf(dev, skb);
3603 
3604 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3605 	 */
3606 	if (skb_queue_empty(&dev->txq) &&
3607 	    !skb_queue_empty(&dev->txq_pend))
3608 		napi_schedule(&dev->napi);
3609 }
3610 
3611 static void lan78xx_queue_skb(struct sk_buff_head *list,
3612 			      struct sk_buff *newsk, enum skb_state state)
3613 {
3614 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3615 
3616 	__skb_queue_tail(list, newsk);
3617 	entry->state = state;
3618 }
3619 
3620 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3621 {
3622 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3623 }
3624 
3625 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3626 {
3627 	return dev->tx_pend_data_len;
3628 }
3629 
3630 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3631 				    struct sk_buff *skb,
3632 				    unsigned int *tx_pend_data_len)
3633 {
3634 	unsigned long flags;
3635 
3636 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3637 
3638 	__skb_queue_tail(&dev->txq_pend, skb);
3639 
3640 	dev->tx_pend_data_len += skb->len;
3641 	*tx_pend_data_len = dev->tx_pend_data_len;
3642 
3643 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3644 }
3645 
3646 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3647 					 struct sk_buff *skb,
3648 					 unsigned int *tx_pend_data_len)
3649 {
3650 	unsigned long flags;
3651 
3652 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3653 
3654 	__skb_queue_head(&dev->txq_pend, skb);
3655 
3656 	dev->tx_pend_data_len += skb->len;
3657 	*tx_pend_data_len = dev->tx_pend_data_len;
3658 
3659 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3660 }
3661 
3662 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3663 				    struct sk_buff **skb,
3664 				    unsigned int *tx_pend_data_len)
3665 {
3666 	unsigned long flags;
3667 
3668 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3669 
3670 	*skb = __skb_dequeue(&dev->txq_pend);
3671 	if (*skb)
3672 		dev->tx_pend_data_len -= (*skb)->len;
3673 	*tx_pend_data_len = dev->tx_pend_data_len;
3674 
3675 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3676 }
3677 
3678 static netdev_tx_t
3679 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3680 {
3681 	struct lan78xx_net *dev = netdev_priv(net);
3682 	unsigned int tx_pend_data_len;
3683 
3684 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3685 		schedule_delayed_work(&dev->wq, 0);
3686 
3687 	skb_tx_timestamp(skb);
3688 
3689 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3690 
3691 	/* Set up a Tx URB if none is in progress */
3692 
3693 	if (skb_queue_empty(&dev->txq))
3694 		napi_schedule(&dev->napi);
3695 
3696 	/* Stop stack Tx queue if we have enough data to fill
3697 	 * all the free Tx URBs.
3698 	 */
3699 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3700 		netif_stop_queue(net);
3701 
3702 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3703 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3704 
3705 		/* Kick off transmission of pending data */
3706 
3707 		if (!skb_queue_empty(&dev->txq_free))
3708 			napi_schedule(&dev->napi);
3709 	}
3710 
3711 	return NETDEV_TX_OK;
3712 }
3713 
3714 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3715 {
3716 	struct lan78xx_priv *pdata = NULL;
3717 	int ret;
3718 	int i;
3719 
3720 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3721 
3722 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3723 	if (!pdata) {
3724 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3725 		return -ENOMEM;
3726 	}
3727 
3728 	pdata->dev = dev;
3729 
3730 	spin_lock_init(&pdata->rfe_ctl_lock);
3731 	mutex_init(&pdata->dataport_mutex);
3732 
3733 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3734 
3735 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3736 		pdata->vlan_table[i] = 0;
3737 
3738 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3739 
3740 	dev->net->features = 0;
3741 
3742 	if (DEFAULT_TX_CSUM_ENABLE)
3743 		dev->net->features |= NETIF_F_HW_CSUM;
3744 
3745 	if (DEFAULT_RX_CSUM_ENABLE)
3746 		dev->net->features |= NETIF_F_RXCSUM;
3747 
3748 	if (DEFAULT_TSO_CSUM_ENABLE)
3749 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3750 
3751 	if (DEFAULT_VLAN_RX_OFFLOAD)
3752 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3753 
3754 	if (DEFAULT_VLAN_FILTER_ENABLE)
3755 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3756 
3757 	dev->net->hw_features = dev->net->features;
3758 
3759 	ret = lan78xx_setup_irq_domain(dev);
3760 	if (ret < 0) {
3761 		netdev_warn(dev->net,
3762 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3763 		goto out1;
3764 	}
3765 
3766 	/* Init all registers */
3767 	ret = lan78xx_reset(dev);
3768 	if (ret) {
3769 		netdev_warn(dev->net, "Registers INIT FAILED....");
3770 		goto out2;
3771 	}
3772 
3773 	ret = lan78xx_mdio_init(dev);
3774 	if (ret) {
3775 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3776 		goto out2;
3777 	}
3778 
3779 	dev->net->flags |= IFF_MULTICAST;
3780 
3781 	pdata->wol = WAKE_MAGIC;
3782 
3783 	return ret;
3784 
3785 out2:
3786 	lan78xx_remove_irq_domain(dev);
3787 
3788 out1:
3789 	netdev_warn(dev->net, "Bind routine FAILED");
3790 	cancel_work_sync(&pdata->set_multicast);
3791 	cancel_work_sync(&pdata->set_vlan);
3792 	kfree(pdata);
3793 	return ret;
3794 }
3795 
3796 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3797 {
3798 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3799 
3800 	lan78xx_remove_irq_domain(dev);
3801 
3802 	lan78xx_remove_mdio(dev);
3803 
3804 	if (pdata) {
3805 		cancel_work_sync(&pdata->set_multicast);
3806 		cancel_work_sync(&pdata->set_vlan);
3807 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3808 		kfree(pdata);
3809 		pdata = NULL;
3810 		dev->data[0] = 0;
3811 	}
3812 }
3813 
3814 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3815 				    struct sk_buff *skb,
3816 				    u32 rx_cmd_a, u32 rx_cmd_b)
3817 {
3818 	/* HW Checksum offload appears to be flawed if used when not stripping
3819 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3820 	 */
3821 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3822 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3823 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3824 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3825 		skb->ip_summed = CHECKSUM_NONE;
3826 	} else {
3827 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3828 		skb->ip_summed = CHECKSUM_COMPLETE;
3829 	}
3830 }
3831 
3832 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3833 				    struct sk_buff *skb,
3834 				    u32 rx_cmd_a, u32 rx_cmd_b)
3835 {
3836 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3837 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3838 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3839 				       (rx_cmd_b & 0xffff));
3840 }
3841 
3842 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3843 {
3844 	dev->net->stats.rx_packets++;
3845 	dev->net->stats.rx_bytes += skb->len;
3846 
3847 	skb->protocol = eth_type_trans(skb, dev->net);
3848 
3849 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3850 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3851 	memset(skb->cb, 0, sizeof(struct skb_data));
3852 
3853 	if (skb_defer_rx_timestamp(skb))
3854 		return;
3855 
3856 	napi_gro_receive(&dev->napi, skb);
3857 }
3858 
3859 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3860 		      int budget, int *work_done)
3861 {
3862 	if (skb->len < RX_SKB_MIN_LEN)
3863 		return 0;
3864 
3865 	/* Extract frames from the URB buffer and pass each one to
3866 	 * the stack in a new NAPI SKB.
3867 	 */
3868 	while (skb->len > 0) {
3869 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3870 		u16 rx_cmd_c;
3871 		unsigned char *packet;
3872 
3873 		rx_cmd_a = get_unaligned_le32(skb->data);
3874 		skb_pull(skb, sizeof(rx_cmd_a));
3875 
3876 		rx_cmd_b = get_unaligned_le32(skb->data);
3877 		skb_pull(skb, sizeof(rx_cmd_b));
3878 
3879 		rx_cmd_c = get_unaligned_le16(skb->data);
3880 		skb_pull(skb, sizeof(rx_cmd_c));
3881 
3882 		packet = skb->data;
3883 
3884 		/* get the packet length */
3885 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3886 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3887 
3888 		if (unlikely(size > skb->len)) {
3889 			netif_dbg(dev, rx_err, dev->net,
3890 				  "size err rx_cmd_a=0x%08x\n",
3891 				  rx_cmd_a);
3892 			return 0;
3893 		}
3894 
3895 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3896 			netif_dbg(dev, rx_err, dev->net,
3897 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3898 		} else {
3899 			u32 frame_len;
3900 			struct sk_buff *skb2;
3901 
3902 			if (unlikely(size < ETH_FCS_LEN)) {
3903 				netif_dbg(dev, rx_err, dev->net,
3904 					  "size err rx_cmd_a=0x%08x\n",
3905 					  rx_cmd_a);
3906 				return 0;
3907 			}
3908 
3909 			frame_len = size - ETH_FCS_LEN;
3910 
3911 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3912 			if (!skb2)
3913 				return 0;
3914 
3915 			memcpy(skb2->data, packet, frame_len);
3916 
3917 			skb_put(skb2, frame_len);
3918 
3919 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3920 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3921 
3922 			/* Processing of the URB buffer must complete once
3923 			 * it has started. If the NAPI work budget is exhausted
3924 			 * while frames remain they are added to the overflow
3925 			 * queue for delivery in the next NAPI polling cycle.
3926 			 */
3927 			if (*work_done < budget) {
3928 				lan78xx_skb_return(dev, skb2);
3929 				++(*work_done);
3930 			} else {
3931 				skb_queue_tail(&dev->rxq_overflow, skb2);
3932 			}
3933 		}
3934 
3935 		skb_pull(skb, size);
3936 
3937 		/* skip padding bytes before the next frame starts */
3938 		if (skb->len)
3939 			skb_pull(skb, align_count);
3940 	}
3941 
3942 	return 1;
3943 }
3944 
3945 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3946 			      int budget, int *work_done)
3947 {
3948 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3949 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3950 		dev->net->stats.rx_errors++;
3951 	}
3952 }
3953 
3954 static void rx_complete(struct urb *urb)
3955 {
3956 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3957 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3958 	struct lan78xx_net *dev = entry->dev;
3959 	int urb_status = urb->status;
3960 	enum skb_state state;
3961 
3962 	netif_dbg(dev, rx_status, dev->net,
3963 		  "rx done: status %d", urb->status);
3964 
3965 	skb_put(skb, urb->actual_length);
3966 	state = rx_done;
3967 
3968 	if (urb != entry->urb)
3969 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3970 
3971 	switch (urb_status) {
3972 	case 0:
3973 		if (skb->len < RX_SKB_MIN_LEN) {
3974 			state = rx_cleanup;
3975 			dev->net->stats.rx_errors++;
3976 			dev->net->stats.rx_length_errors++;
3977 			netif_dbg(dev, rx_err, dev->net,
3978 				  "rx length %d\n", skb->len);
3979 		}
3980 		usb_mark_last_busy(dev->udev);
3981 		break;
3982 	case -EPIPE:
3983 		dev->net->stats.rx_errors++;
3984 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3985 		fallthrough;
3986 	case -ECONNRESET:				/* async unlink */
3987 	case -ESHUTDOWN:				/* hardware gone */
3988 		netif_dbg(dev, ifdown, dev->net,
3989 			  "rx shutdown, code %d\n", urb_status);
3990 		state = rx_cleanup;
3991 		break;
3992 	case -EPROTO:
3993 	case -ETIME:
3994 	case -EILSEQ:
3995 		dev->net->stats.rx_errors++;
3996 		state = rx_cleanup;
3997 		break;
3998 
3999 	/* data overrun ... flush fifo? */
4000 	case -EOVERFLOW:
4001 		dev->net->stats.rx_over_errors++;
4002 		fallthrough;
4003 
4004 	default:
4005 		state = rx_cleanup;
4006 		dev->net->stats.rx_errors++;
4007 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4008 		break;
4009 	}
4010 
4011 	state = defer_bh(dev, skb, &dev->rxq, state);
4012 }
4013 
4014 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4015 {
4016 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4017 	size_t size = dev->rx_urb_size;
4018 	struct urb *urb = entry->urb;
4019 	unsigned long lockflags;
4020 	int ret = 0;
4021 
4022 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4023 			  skb->data, size, rx_complete, skb);
4024 
4025 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4026 
4027 	if (netif_device_present(dev->net) &&
4028 	    netif_running(dev->net) &&
4029 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4030 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4031 		ret = usb_submit_urb(urb, flags);
4032 		switch (ret) {
4033 		case 0:
4034 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4035 			break;
4036 		case -EPIPE:
4037 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4038 			break;
4039 		case -ENODEV:
4040 		case -ENOENT:
4041 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4042 			netif_device_detach(dev->net);
4043 			break;
4044 		case -EHOSTUNREACH:
4045 			ret = -ENOLINK;
4046 			napi_schedule(&dev->napi);
4047 			break;
4048 		default:
4049 			netif_dbg(dev, rx_err, dev->net,
4050 				  "rx submit, %d\n", ret);
4051 			napi_schedule(&dev->napi);
4052 			break;
4053 		}
4054 	} else {
4055 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4056 		ret = -ENOLINK;
4057 	}
4058 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4059 
4060 	if (ret)
4061 		lan78xx_release_rx_buf(dev, skb);
4062 
4063 	return ret;
4064 }
4065 
4066 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4067 {
4068 	struct sk_buff *rx_buf;
4069 
4070 	/* Ensure the maximum number of Rx URBs is submitted
4071 	 */
4072 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4073 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4074 			break;
4075 	}
4076 }
4077 
4078 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4079 				    struct sk_buff *rx_buf)
4080 {
4081 	/* reset SKB data pointers */
4082 
4083 	rx_buf->data = rx_buf->head;
4084 	skb_reset_tail_pointer(rx_buf);
4085 	rx_buf->len = 0;
4086 	rx_buf->data_len = 0;
4087 
4088 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4089 }
4090 
4091 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4092 {
4093 	u32 tx_cmd_a;
4094 	u32 tx_cmd_b;
4095 
4096 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4097 
4098 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4099 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4100 
4101 	tx_cmd_b = 0;
4102 	if (skb_is_gso(skb)) {
4103 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4104 
4105 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4106 
4107 		tx_cmd_a |= TX_CMD_A_LSO_;
4108 	}
4109 
4110 	if (skb_vlan_tag_present(skb)) {
4111 		tx_cmd_a |= TX_CMD_A_IVTG_;
4112 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4113 	}
4114 
4115 	put_unaligned_le32(tx_cmd_a, buffer);
4116 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4117 }
4118 
4119 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4120 					    struct sk_buff *tx_buf)
4121 {
4122 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4123 	int remain = dev->tx_urb_size;
4124 	u8 *tx_data = tx_buf->data;
4125 	u32 urb_len = 0;
4126 
4127 	entry->num_of_packet = 0;
4128 	entry->length = 0;
4129 
4130 	/* Work through the pending SKBs and copy the data of each SKB into
4131 	 * the URB buffer if there room for all the SKB data.
4132 	 *
4133 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4134 	 */
4135 	while (remain >= TX_SKB_MIN_LEN) {
4136 		unsigned int pending_bytes;
4137 		unsigned int align_bytes;
4138 		struct sk_buff *skb;
4139 		unsigned int len;
4140 
4141 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4142 
4143 		if (!skb)
4144 			break;
4145 
4146 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4147 			      TX_ALIGNMENT;
4148 		len = align_bytes + TX_CMD_LEN + skb->len;
4149 		if (len > remain) {
4150 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4151 			break;
4152 		}
4153 
4154 		tx_data += align_bytes;
4155 
4156 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4157 		tx_data += TX_CMD_LEN;
4158 
4159 		len = skb->len;
4160 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4161 			struct net_device_stats *stats = &dev->net->stats;
4162 
4163 			stats->tx_dropped++;
4164 			dev_kfree_skb_any(skb);
4165 			tx_data -= TX_CMD_LEN;
4166 			continue;
4167 		}
4168 
4169 		tx_data += len;
4170 		entry->length += len;
4171 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4172 
4173 		dev_kfree_skb_any(skb);
4174 
4175 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4176 
4177 		remain = dev->tx_urb_size - urb_len;
4178 	}
4179 
4180 	skb_put(tx_buf, urb_len);
4181 
4182 	return entry;
4183 }
4184 
4185 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4186 {
4187 	int ret;
4188 
4189 	/* Start the stack Tx queue if it was stopped
4190 	 */
4191 	netif_tx_lock(dev->net);
4192 	if (netif_queue_stopped(dev->net)) {
4193 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4194 			netif_wake_queue(dev->net);
4195 	}
4196 	netif_tx_unlock(dev->net);
4197 
4198 	/* Go through the Tx pending queue and set up URBs to transfer
4199 	 * the data to the device. Stop if no more pending data or URBs,
4200 	 * or if an error occurs when a URB is submitted.
4201 	 */
4202 	do {
4203 		struct skb_data *entry;
4204 		struct sk_buff *tx_buf;
4205 		unsigned long flags;
4206 
4207 		if (skb_queue_empty(&dev->txq_pend))
4208 			break;
4209 
4210 		tx_buf = lan78xx_get_tx_buf(dev);
4211 		if (!tx_buf)
4212 			break;
4213 
4214 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4215 
4216 		spin_lock_irqsave(&dev->txq.lock, flags);
4217 		ret = usb_autopm_get_interface_async(dev->intf);
4218 		if (ret < 0) {
4219 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4220 			goto out;
4221 		}
4222 
4223 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4224 				  tx_buf->data, tx_buf->len, tx_complete,
4225 				  tx_buf);
4226 
4227 		if (tx_buf->len % dev->maxpacket == 0) {
4228 			/* send USB_ZERO_PACKET */
4229 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4230 		}
4231 
4232 #ifdef CONFIG_PM
4233 		/* if device is asleep stop outgoing packet processing */
4234 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4235 			usb_anchor_urb(entry->urb, &dev->deferred);
4236 			netif_stop_queue(dev->net);
4237 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4238 			netdev_dbg(dev->net,
4239 				   "Delaying transmission for resumption\n");
4240 			return;
4241 		}
4242 #endif
4243 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4244 		switch (ret) {
4245 		case 0:
4246 			netif_trans_update(dev->net);
4247 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4248 			break;
4249 		case -EPIPE:
4250 			netif_stop_queue(dev->net);
4251 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4252 			usb_autopm_put_interface_async(dev->intf);
4253 			break;
4254 		case -ENODEV:
4255 		case -ENOENT:
4256 			netif_dbg(dev, tx_err, dev->net,
4257 				  "tx submit urb err %d (disconnected?)", ret);
4258 			netif_device_detach(dev->net);
4259 			break;
4260 		default:
4261 			usb_autopm_put_interface_async(dev->intf);
4262 			netif_dbg(dev, tx_err, dev->net,
4263 				  "tx submit urb err %d\n", ret);
4264 			break;
4265 		}
4266 
4267 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4268 
4269 		if (ret) {
4270 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4271 out:
4272 			dev->net->stats.tx_dropped += entry->num_of_packet;
4273 			lan78xx_release_tx_buf(dev, tx_buf);
4274 		}
4275 	} while (ret == 0);
4276 }
4277 
4278 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4279 {
4280 	struct sk_buff_head done;
4281 	struct sk_buff *rx_buf;
4282 	struct skb_data *entry;
4283 	unsigned long flags;
4284 	int work_done = 0;
4285 
4286 	/* Pass frames received in the last NAPI cycle before
4287 	 * working on newly completed URBs.
4288 	 */
4289 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4290 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4291 		++work_done;
4292 	}
4293 
4294 	/* Take a snapshot of the done queue and move items to a
4295 	 * temporary queue. Rx URB completions will continue to add
4296 	 * to the done queue.
4297 	 */
4298 	__skb_queue_head_init(&done);
4299 
4300 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4301 	skb_queue_splice_init(&dev->rxq_done, &done);
4302 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4303 
4304 	/* Extract receive frames from completed URBs and
4305 	 * pass them to the stack. Re-submit each completed URB.
4306 	 */
4307 	while ((work_done < budget) &&
4308 	       (rx_buf = __skb_dequeue(&done))) {
4309 		entry = (struct skb_data *)(rx_buf->cb);
4310 		switch (entry->state) {
4311 		case rx_done:
4312 			rx_process(dev, rx_buf, budget, &work_done);
4313 			break;
4314 		case rx_cleanup:
4315 			break;
4316 		default:
4317 			netdev_dbg(dev->net, "rx buf state %d\n",
4318 				   entry->state);
4319 			break;
4320 		}
4321 
4322 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4323 	}
4324 
4325 	/* If budget was consumed before processing all the URBs put them
4326 	 * back on the front of the done queue. They will be first to be
4327 	 * processed in the next NAPI cycle.
4328 	 */
4329 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4330 	skb_queue_splice(&done, &dev->rxq_done);
4331 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4332 
4333 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4334 		/* reset update timer delta */
4335 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4336 			dev->delta = 1;
4337 			mod_timer(&dev->stat_monitor,
4338 				  jiffies + STAT_UPDATE_TIMER);
4339 		}
4340 
4341 		/* Submit all free Rx URBs */
4342 
4343 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4344 			lan78xx_rx_urb_submit_all(dev);
4345 
4346 		/* Submit new Tx URBs */
4347 
4348 		lan78xx_tx_bh(dev);
4349 	}
4350 
4351 	return work_done;
4352 }
4353 
4354 static int lan78xx_poll(struct napi_struct *napi, int budget)
4355 {
4356 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4357 	int result = budget;
4358 	int work_done;
4359 
4360 	/* Don't do any work if the device is suspended */
4361 
4362 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4363 		napi_complete_done(napi, 0);
4364 		return 0;
4365 	}
4366 
4367 	/* Process completed URBs and submit new URBs */
4368 
4369 	work_done = lan78xx_bh(dev, budget);
4370 
4371 	if (work_done < budget) {
4372 		napi_complete_done(napi, work_done);
4373 
4374 		/* Start a new polling cycle if data was received or
4375 		 * data is waiting to be transmitted.
4376 		 */
4377 		if (!skb_queue_empty(&dev->rxq_done)) {
4378 			napi_schedule(napi);
4379 		} else if (netif_carrier_ok(dev->net)) {
4380 			if (skb_queue_empty(&dev->txq) &&
4381 			    !skb_queue_empty(&dev->txq_pend)) {
4382 				napi_schedule(napi);
4383 			} else {
4384 				netif_tx_lock(dev->net);
4385 				if (netif_queue_stopped(dev->net)) {
4386 					netif_wake_queue(dev->net);
4387 					napi_schedule(napi);
4388 				}
4389 				netif_tx_unlock(dev->net);
4390 			}
4391 		}
4392 		result = work_done;
4393 	}
4394 
4395 	return result;
4396 }
4397 
4398 static void lan78xx_delayedwork(struct work_struct *work)
4399 {
4400 	int status;
4401 	struct lan78xx_net *dev;
4402 
4403 	dev = container_of(work, struct lan78xx_net, wq.work);
4404 
4405 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4406 		return;
4407 
4408 	if (usb_autopm_get_interface(dev->intf) < 0)
4409 		return;
4410 
4411 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4412 		unlink_urbs(dev, &dev->txq);
4413 
4414 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4415 		if (status < 0 &&
4416 		    status != -EPIPE &&
4417 		    status != -ESHUTDOWN) {
4418 			if (netif_msg_tx_err(dev))
4419 				netdev_err(dev->net,
4420 					   "can't clear tx halt, status %d\n",
4421 					   status);
4422 		} else {
4423 			clear_bit(EVENT_TX_HALT, &dev->flags);
4424 			if (status != -ESHUTDOWN)
4425 				netif_wake_queue(dev->net);
4426 		}
4427 	}
4428 
4429 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4430 		unlink_urbs(dev, &dev->rxq);
4431 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4432 		if (status < 0 &&
4433 		    status != -EPIPE &&
4434 		    status != -ESHUTDOWN) {
4435 			if (netif_msg_rx_err(dev))
4436 				netdev_err(dev->net,
4437 					   "can't clear rx halt, status %d\n",
4438 					   status);
4439 		} else {
4440 			clear_bit(EVENT_RX_HALT, &dev->flags);
4441 			napi_schedule(&dev->napi);
4442 		}
4443 	}
4444 
4445 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4446 		int ret = 0;
4447 
4448 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4449 		ret = lan78xx_phy_int_ack(dev);
4450 		if (ret)
4451 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4452 				    ERR_PTR(ret));
4453 	}
4454 
4455 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4456 		lan78xx_update_stats(dev);
4457 
4458 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4459 
4460 		mod_timer(&dev->stat_monitor,
4461 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4462 
4463 		dev->delta = min((dev->delta * 2), 50);
4464 	}
4465 
4466 	usb_autopm_put_interface(dev->intf);
4467 }
4468 
4469 static void intr_complete(struct urb *urb)
4470 {
4471 	struct lan78xx_net *dev = urb->context;
4472 	int status = urb->status;
4473 
4474 	switch (status) {
4475 	/* success */
4476 	case 0:
4477 		lan78xx_status(dev, urb);
4478 		break;
4479 
4480 	/* software-driven interface shutdown */
4481 	case -ENOENT:			/* urb killed */
4482 	case -ENODEV:			/* hardware gone */
4483 	case -ESHUTDOWN:		/* hardware gone */
4484 		netif_dbg(dev, ifdown, dev->net,
4485 			  "intr shutdown, code %d\n", status);
4486 		return;
4487 
4488 	/* NOTE:  not throttling like RX/TX, since this endpoint
4489 	 * already polls infrequently
4490 	 */
4491 	default:
4492 		netdev_dbg(dev->net, "intr status %d\n", status);
4493 		break;
4494 	}
4495 
4496 	if (!netif_device_present(dev->net) ||
4497 	    !netif_running(dev->net)) {
4498 		netdev_warn(dev->net, "not submitting new status URB");
4499 		return;
4500 	}
4501 
4502 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4503 	status = usb_submit_urb(urb, GFP_ATOMIC);
4504 
4505 	switch (status) {
4506 	case  0:
4507 		break;
4508 	case -ENODEV:
4509 	case -ENOENT:
4510 		netif_dbg(dev, timer, dev->net,
4511 			  "intr resubmit %d (disconnect?)", status);
4512 		netif_device_detach(dev->net);
4513 		break;
4514 	default:
4515 		netif_err(dev, timer, dev->net,
4516 			  "intr resubmit --> %d\n", status);
4517 		break;
4518 	}
4519 }
4520 
4521 static void lan78xx_disconnect(struct usb_interface *intf)
4522 {
4523 	struct lan78xx_net *dev;
4524 	struct usb_device *udev;
4525 	struct net_device *net;
4526 
4527 	dev = usb_get_intfdata(intf);
4528 	usb_set_intfdata(intf, NULL);
4529 	if (!dev)
4530 		return;
4531 
4532 	udev = interface_to_usbdev(intf);
4533 	net = dev->net;
4534 
4535 	rtnl_lock();
4536 	phylink_stop(dev->phylink);
4537 	phylink_disconnect_phy(dev->phylink);
4538 	rtnl_unlock();
4539 
4540 	netif_napi_del(&dev->napi);
4541 
4542 	unregister_netdev(net);
4543 
4544 	timer_shutdown_sync(&dev->stat_monitor);
4545 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4546 	cancel_delayed_work_sync(&dev->wq);
4547 
4548 	phylink_destroy(dev->phylink);
4549 
4550 	usb_scuttle_anchored_urbs(&dev->deferred);
4551 
4552 	lan78xx_unbind(dev, intf);
4553 
4554 	lan78xx_free_tx_resources(dev);
4555 	lan78xx_free_rx_resources(dev);
4556 
4557 	usb_kill_urb(dev->urb_intr);
4558 	usb_free_urb(dev->urb_intr);
4559 
4560 	free_netdev(net);
4561 	usb_put_dev(udev);
4562 }
4563 
4564 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4565 {
4566 	struct lan78xx_net *dev = netdev_priv(net);
4567 
4568 	unlink_urbs(dev, &dev->txq);
4569 	napi_schedule(&dev->napi);
4570 }
4571 
4572 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4573 						struct net_device *netdev,
4574 						netdev_features_t features)
4575 {
4576 	struct lan78xx_net *dev = netdev_priv(netdev);
4577 
4578 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4579 		features &= ~NETIF_F_GSO_MASK;
4580 
4581 	features = vlan_features_check(skb, features);
4582 	features = vxlan_features_check(skb, features);
4583 
4584 	return features;
4585 }
4586 
4587 static const struct net_device_ops lan78xx_netdev_ops = {
4588 	.ndo_open		= lan78xx_open,
4589 	.ndo_stop		= lan78xx_stop,
4590 	.ndo_start_xmit		= lan78xx_start_xmit,
4591 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4592 	.ndo_change_mtu		= lan78xx_change_mtu,
4593 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4594 	.ndo_validate_addr	= eth_validate_addr,
4595 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4596 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4597 	.ndo_set_features	= lan78xx_set_features,
4598 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4599 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4600 	.ndo_features_check	= lan78xx_features_check,
4601 };
4602 
4603 static void lan78xx_stat_monitor(struct timer_list *t)
4604 {
4605 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4606 
4607 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4608 }
4609 
4610 static int lan78xx_probe(struct usb_interface *intf,
4611 			 const struct usb_device_id *id)
4612 {
4613 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4614 	struct lan78xx_net *dev;
4615 	struct net_device *netdev;
4616 	struct usb_device *udev;
4617 	int ret;
4618 	unsigned int maxp;
4619 	unsigned int period;
4620 	u8 *buf = NULL;
4621 
4622 	udev = interface_to_usbdev(intf);
4623 	udev = usb_get_dev(udev);
4624 
4625 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4626 	if (!netdev) {
4627 		dev_err(&intf->dev, "Error: OOM\n");
4628 		ret = -ENOMEM;
4629 		goto out1;
4630 	}
4631 
4632 	SET_NETDEV_DEV(netdev, &intf->dev);
4633 
4634 	dev = netdev_priv(netdev);
4635 	dev->udev = udev;
4636 	dev->intf = intf;
4637 	dev->net = netdev;
4638 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4639 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4640 
4641 	skb_queue_head_init(&dev->rxq);
4642 	skb_queue_head_init(&dev->txq);
4643 	skb_queue_head_init(&dev->rxq_done);
4644 	skb_queue_head_init(&dev->txq_pend);
4645 	skb_queue_head_init(&dev->rxq_overflow);
4646 	mutex_init(&dev->mdiobus_mutex);
4647 	mutex_init(&dev->dev_mutex);
4648 
4649 	ret = lan78xx_urb_config_init(dev);
4650 	if (ret < 0)
4651 		goto out2;
4652 
4653 	ret = lan78xx_alloc_tx_resources(dev);
4654 	if (ret < 0)
4655 		goto out2;
4656 
4657 	ret = lan78xx_alloc_rx_resources(dev);
4658 	if (ret < 0)
4659 		goto out3;
4660 
4661 	/* MTU range: 68 - 9000 */
4662 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4663 
4664 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4665 
4666 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4667 
4668 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4669 	init_usb_anchor(&dev->deferred);
4670 
4671 	netdev->netdev_ops = &lan78xx_netdev_ops;
4672 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4673 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4674 
4675 	dev->delta = 1;
4676 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4677 
4678 	mutex_init(&dev->stats.access_lock);
4679 
4680 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4681 		ret = -ENODEV;
4682 		goto out4;
4683 	}
4684 
4685 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4686 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4687 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4688 		ret = -ENODEV;
4689 		goto out4;
4690 	}
4691 
4692 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4693 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4694 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4695 		ret = -ENODEV;
4696 		goto out4;
4697 	}
4698 
4699 	ep_intr = &intf->cur_altsetting->endpoint[2];
4700 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4701 		ret = -ENODEV;
4702 		goto out4;
4703 	}
4704 
4705 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4706 					usb_endpoint_num(&ep_intr->desc));
4707 
4708 	ret = lan78xx_bind(dev, intf);
4709 	if (ret < 0)
4710 		goto out4;
4711 
4712 	period = ep_intr->desc.bInterval;
4713 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4714 
4715 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4716 	if (!dev->urb_intr) {
4717 		ret = -ENOMEM;
4718 		goto out5;
4719 	}
4720 
4721 	buf = kmalloc(maxp, GFP_KERNEL);
4722 	if (!buf) {
4723 		ret = -ENOMEM;
4724 		goto free_urbs;
4725 	}
4726 
4727 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4728 			 dev->pipe_intr, buf, maxp,
4729 			 intr_complete, dev, period);
4730 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4731 
4732 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4733 
4734 	/* Reject broken descriptors. */
4735 	if (dev->maxpacket == 0) {
4736 		ret = -ENODEV;
4737 		goto free_urbs;
4738 	}
4739 
4740 	/* driver requires remote-wakeup capability during autosuspend. */
4741 	intf->needs_remote_wakeup = 1;
4742 
4743 	ret = lan78xx_phy_init(dev);
4744 	if (ret < 0)
4745 		goto free_urbs;
4746 
4747 	ret = register_netdev(netdev);
4748 	if (ret != 0) {
4749 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4750 		goto phy_uninit;
4751 	}
4752 
4753 	usb_set_intfdata(intf, dev);
4754 
4755 	ret = device_set_wakeup_enable(&udev->dev, true);
4756 
4757 	 /* Default delay of 2sec has more overhead than advantage.
4758 	  * Set to 10sec as default.
4759 	  */
4760 	pm_runtime_set_autosuspend_delay(&udev->dev,
4761 					 DEFAULT_AUTOSUSPEND_DELAY);
4762 
4763 	return 0;
4764 
4765 phy_uninit:
4766 	lan78xx_phy_uninit(dev);
4767 free_urbs:
4768 	usb_free_urb(dev->urb_intr);
4769 out5:
4770 	lan78xx_unbind(dev, intf);
4771 out4:
4772 	netif_napi_del(&dev->napi);
4773 	lan78xx_free_rx_resources(dev);
4774 out3:
4775 	lan78xx_free_tx_resources(dev);
4776 out2:
4777 	free_netdev(netdev);
4778 out1:
4779 	usb_put_dev(udev);
4780 
4781 	return ret;
4782 }
4783 
4784 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4785 {
4786 	const u16 crc16poly = 0x8005;
4787 	int i;
4788 	u16 bit, crc, msb;
4789 	u8 data;
4790 
4791 	crc = 0xFFFF;
4792 	for (i = 0; i < len; i++) {
4793 		data = *buf++;
4794 		for (bit = 0; bit < 8; bit++) {
4795 			msb = crc >> 15;
4796 			crc <<= 1;
4797 
4798 			if (msb ^ (u16)(data & 1)) {
4799 				crc ^= crc16poly;
4800 				crc |= (u16)0x0001U;
4801 			}
4802 			data >>= 1;
4803 		}
4804 	}
4805 
4806 	return crc;
4807 }
4808 
4809 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4810 {
4811 	u32 buf;
4812 	int ret;
4813 
4814 	ret = lan78xx_stop_tx_path(dev);
4815 	if (ret < 0)
4816 		return ret;
4817 
4818 	ret = lan78xx_stop_rx_path(dev);
4819 	if (ret < 0)
4820 		return ret;
4821 
4822 	/* auto suspend (selective suspend) */
4823 
4824 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4825 	if (ret < 0)
4826 		return ret;
4827 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4828 	if (ret < 0)
4829 		return ret;
4830 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4831 	if (ret < 0)
4832 		return ret;
4833 
4834 	/* set goodframe wakeup */
4835 
4836 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4837 	if (ret < 0)
4838 		return ret;
4839 
4840 	buf |= WUCSR_RFE_WAKE_EN_;
4841 	buf |= WUCSR_STORE_WAKE_;
4842 
4843 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4844 	if (ret < 0)
4845 		return ret;
4846 
4847 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4848 	if (ret < 0)
4849 		return ret;
4850 
4851 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4852 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4853 	buf |= PMT_CTL_PHY_WAKE_EN_;
4854 	buf |= PMT_CTL_WOL_EN_;
4855 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4856 	buf |= PMT_CTL_SUS_MODE_3_;
4857 
4858 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4859 	if (ret < 0)
4860 		return ret;
4861 
4862 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4863 	if (ret < 0)
4864 		return ret;
4865 
4866 	buf |= PMT_CTL_WUPS_MASK_;
4867 
4868 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4869 	if (ret < 0)
4870 		return ret;
4871 
4872 	ret = lan78xx_start_rx_path(dev);
4873 
4874 	return ret;
4875 }
4876 
4877 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4878 {
4879 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4880 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4881 	const u8 arp_type[2] = { 0x08, 0x06 };
4882 	u32 temp_pmt_ctl;
4883 	int mask_index;
4884 	u32 temp_wucsr;
4885 	u32 buf;
4886 	u16 crc;
4887 	int ret;
4888 
4889 	ret = lan78xx_stop_tx_path(dev);
4890 	if (ret < 0)
4891 		return ret;
4892 	ret = lan78xx_stop_rx_path(dev);
4893 	if (ret < 0)
4894 		return ret;
4895 
4896 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4897 	if (ret < 0)
4898 		return ret;
4899 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4900 	if (ret < 0)
4901 		return ret;
4902 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4903 	if (ret < 0)
4904 		return ret;
4905 
4906 	temp_wucsr = 0;
4907 
4908 	temp_pmt_ctl = 0;
4909 
4910 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4911 	if (ret < 0)
4912 		return ret;
4913 
4914 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4915 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4916 
4917 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4918 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4919 		if (ret < 0)
4920 			return ret;
4921 	}
4922 
4923 	mask_index = 0;
4924 	if (wol & WAKE_PHY) {
4925 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4926 
4927 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4928 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4929 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4930 	}
4931 	if (wol & WAKE_MAGIC) {
4932 		temp_wucsr |= WUCSR_MPEN_;
4933 
4934 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4935 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4936 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4937 	}
4938 	if (wol & WAKE_BCAST) {
4939 		temp_wucsr |= WUCSR_BCST_EN_;
4940 
4941 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4942 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4943 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4944 	}
4945 	if (wol & WAKE_MCAST) {
4946 		temp_wucsr |= WUCSR_WAKE_EN_;
4947 
4948 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4949 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4950 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4951 					WUF_CFGX_EN_ |
4952 					WUF_CFGX_TYPE_MCAST_ |
4953 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4954 					(crc & WUF_CFGX_CRC16_MASK_));
4955 		if (ret < 0)
4956 			return ret;
4957 
4958 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4959 		if (ret < 0)
4960 			return ret;
4961 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4962 		if (ret < 0)
4963 			return ret;
4964 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4965 		if (ret < 0)
4966 			return ret;
4967 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4968 		if (ret < 0)
4969 			return ret;
4970 
4971 		mask_index++;
4972 
4973 		/* for IPv6 Multicast */
4974 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4975 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4976 					WUF_CFGX_EN_ |
4977 					WUF_CFGX_TYPE_MCAST_ |
4978 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4979 					(crc & WUF_CFGX_CRC16_MASK_));
4980 		if (ret < 0)
4981 			return ret;
4982 
4983 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4984 		if (ret < 0)
4985 			return ret;
4986 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4987 		if (ret < 0)
4988 			return ret;
4989 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4990 		if (ret < 0)
4991 			return ret;
4992 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4993 		if (ret < 0)
4994 			return ret;
4995 
4996 		mask_index++;
4997 
4998 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4999 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5000 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5001 	}
5002 	if (wol & WAKE_UCAST) {
5003 		temp_wucsr |= WUCSR_PFDA_EN_;
5004 
5005 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5006 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5007 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5008 	}
5009 	if (wol & WAKE_ARP) {
5010 		temp_wucsr |= WUCSR_WAKE_EN_;
5011 
5012 		/* set WUF_CFG & WUF_MASK
5013 		 * for packettype (offset 12,13) = ARP (0x0806)
5014 		 */
5015 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5016 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5017 					WUF_CFGX_EN_ |
5018 					WUF_CFGX_TYPE_ALL_ |
5019 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5020 					(crc & WUF_CFGX_CRC16_MASK_));
5021 		if (ret < 0)
5022 			return ret;
5023 
5024 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5025 		if (ret < 0)
5026 			return ret;
5027 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5028 		if (ret < 0)
5029 			return ret;
5030 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5031 		if (ret < 0)
5032 			return ret;
5033 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5034 		if (ret < 0)
5035 			return ret;
5036 
5037 		mask_index++;
5038 
5039 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5040 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5041 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5042 	}
5043 
5044 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5045 	if (ret < 0)
5046 		return ret;
5047 
5048 	/* when multiple WOL bits are set */
5049 	if (hweight_long((unsigned long)wol) > 1) {
5050 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5051 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5052 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5053 	}
5054 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5055 	if (ret < 0)
5056 		return ret;
5057 
5058 	/* clear WUPS */
5059 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5060 	if (ret < 0)
5061 		return ret;
5062 
5063 	buf |= PMT_CTL_WUPS_MASK_;
5064 
5065 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5066 	if (ret < 0)
5067 		return ret;
5068 
5069 	ret = lan78xx_start_rx_path(dev);
5070 
5071 	return ret;
5072 }
5073 
5074 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5075 {
5076 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5077 	bool dev_open;
5078 	int ret;
5079 
5080 	mutex_lock(&dev->dev_mutex);
5081 
5082 	netif_dbg(dev, ifdown, dev->net,
5083 		  "suspending: pm event %#x", message.event);
5084 
5085 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5086 
5087 	if (dev_open) {
5088 		spin_lock_irq(&dev->txq.lock);
5089 		/* don't autosuspend while transmitting */
5090 		if ((skb_queue_len(&dev->txq) ||
5091 		     skb_queue_len(&dev->txq_pend)) &&
5092 		    PMSG_IS_AUTO(message)) {
5093 			spin_unlock_irq(&dev->txq.lock);
5094 			ret = -EBUSY;
5095 			goto out;
5096 		} else {
5097 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5098 			spin_unlock_irq(&dev->txq.lock);
5099 		}
5100 
5101 		rtnl_lock();
5102 		phylink_suspend(dev->phylink, false);
5103 		rtnl_unlock();
5104 
5105 		/* stop RX */
5106 		ret = lan78xx_stop_rx_path(dev);
5107 		if (ret < 0)
5108 			goto out;
5109 
5110 		ret = lan78xx_flush_rx_fifo(dev);
5111 		if (ret < 0)
5112 			goto out;
5113 
5114 		/* stop Tx */
5115 		ret = lan78xx_stop_tx_path(dev);
5116 		if (ret < 0)
5117 			goto out;
5118 
5119 		/* empty out the Rx and Tx queues */
5120 		netif_device_detach(dev->net);
5121 		lan78xx_terminate_urbs(dev);
5122 		usb_kill_urb(dev->urb_intr);
5123 
5124 		/* reattach */
5125 		netif_device_attach(dev->net);
5126 
5127 		timer_delete(&dev->stat_monitor);
5128 
5129 		if (PMSG_IS_AUTO(message)) {
5130 			ret = lan78xx_set_auto_suspend(dev);
5131 			if (ret < 0)
5132 				goto out;
5133 		} else {
5134 			struct lan78xx_priv *pdata;
5135 
5136 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5137 			netif_carrier_off(dev->net);
5138 			ret = lan78xx_set_suspend(dev, pdata->wol);
5139 			if (ret < 0)
5140 				goto out;
5141 		}
5142 	} else {
5143 		/* Interface is down; don't allow WOL and PHY
5144 		 * events to wake up the host
5145 		 */
5146 		u32 buf;
5147 
5148 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5149 
5150 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5151 		if (ret < 0)
5152 			goto out;
5153 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5154 		if (ret < 0)
5155 			goto out;
5156 
5157 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5158 		if (ret < 0)
5159 			goto out;
5160 
5161 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5162 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5163 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5164 		buf |= PMT_CTL_SUS_MODE_3_;
5165 
5166 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5167 		if (ret < 0)
5168 			goto out;
5169 
5170 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5171 		if (ret < 0)
5172 			goto out;
5173 
5174 		buf |= PMT_CTL_WUPS_MASK_;
5175 
5176 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5177 		if (ret < 0)
5178 			goto out;
5179 	}
5180 
5181 	ret = 0;
5182 out:
5183 	mutex_unlock(&dev->dev_mutex);
5184 
5185 	return ret;
5186 }
5187 
5188 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5189 {
5190 	bool pipe_halted = false;
5191 	struct urb *urb;
5192 
5193 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5194 		struct sk_buff *skb = urb->context;
5195 		int ret;
5196 
5197 		if (!netif_device_present(dev->net) ||
5198 		    !netif_carrier_ok(dev->net) ||
5199 		    pipe_halted) {
5200 			lan78xx_release_tx_buf(dev, skb);
5201 			continue;
5202 		}
5203 
5204 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5205 
5206 		if (ret == 0) {
5207 			netif_trans_update(dev->net);
5208 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5209 		} else {
5210 			if (ret == -EPIPE) {
5211 				netif_stop_queue(dev->net);
5212 				pipe_halted = true;
5213 			} else if (ret == -ENODEV) {
5214 				netif_device_detach(dev->net);
5215 			}
5216 
5217 			lan78xx_release_tx_buf(dev, skb);
5218 		}
5219 	}
5220 
5221 	return pipe_halted;
5222 }
5223 
5224 static int lan78xx_resume(struct usb_interface *intf)
5225 {
5226 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5227 	bool dev_open;
5228 	int ret;
5229 
5230 	mutex_lock(&dev->dev_mutex);
5231 
5232 	netif_dbg(dev, ifup, dev->net, "resuming device");
5233 
5234 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5235 
5236 	if (dev_open) {
5237 		bool pipe_halted = false;
5238 
5239 		ret = lan78xx_flush_tx_fifo(dev);
5240 		if (ret < 0)
5241 			goto out;
5242 
5243 		if (dev->urb_intr) {
5244 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5245 
5246 			if (ret < 0) {
5247 				if (ret == -ENODEV)
5248 					netif_device_detach(dev->net);
5249 				netdev_warn(dev->net, "Failed to submit intr URB");
5250 			}
5251 		}
5252 
5253 		spin_lock_irq(&dev->txq.lock);
5254 
5255 		if (netif_device_present(dev->net)) {
5256 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5257 
5258 			if (pipe_halted)
5259 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5260 		}
5261 
5262 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5263 
5264 		spin_unlock_irq(&dev->txq.lock);
5265 
5266 		if (!pipe_halted &&
5267 		    netif_device_present(dev->net) &&
5268 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5269 			netif_start_queue(dev->net);
5270 
5271 		ret = lan78xx_start_tx_path(dev);
5272 		if (ret < 0)
5273 			goto out;
5274 
5275 		napi_schedule(&dev->napi);
5276 
5277 		if (!timer_pending(&dev->stat_monitor)) {
5278 			dev->delta = 1;
5279 			mod_timer(&dev->stat_monitor,
5280 				  jiffies + STAT_UPDATE_TIMER);
5281 		}
5282 
5283 	} else {
5284 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5285 	}
5286 
5287 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5288 	if (ret < 0)
5289 		goto out;
5290 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5291 	if (ret < 0)
5292 		goto out;
5293 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5294 	if (ret < 0)
5295 		goto out;
5296 
5297 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5298 					     WUCSR2_ARP_RCD_ |
5299 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5300 					     WUCSR2_IPV4_TCPSYN_RCD_);
5301 	if (ret < 0)
5302 		goto out;
5303 
5304 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5305 					    WUCSR_EEE_RX_WAKE_ |
5306 					    WUCSR_PFDA_FR_ |
5307 					    WUCSR_RFE_WAKE_FR_ |
5308 					    WUCSR_WUFR_ |
5309 					    WUCSR_MPR_ |
5310 					    WUCSR_BCST_FR_);
5311 	if (ret < 0)
5312 		goto out;
5313 
5314 	ret = 0;
5315 out:
5316 	mutex_unlock(&dev->dev_mutex);
5317 
5318 	return ret;
5319 }
5320 
5321 static int lan78xx_reset_resume(struct usb_interface *intf)
5322 {
5323 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5324 	int ret;
5325 
5326 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5327 
5328 	ret = lan78xx_reset(dev);
5329 	if (ret < 0)
5330 		return ret;
5331 
5332 	ret = lan78xx_resume(intf);
5333 	if (ret < 0)
5334 		return ret;
5335 
5336 	rtnl_lock();
5337 	phylink_resume(dev->phylink);
5338 	rtnl_unlock();
5339 
5340 	return 0;
5341 }
5342 
5343 static const struct usb_device_id products[] = {
5344 	{
5345 	/* LAN7800 USB Gigabit Ethernet Device */
5346 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5347 	},
5348 	{
5349 	/* LAN7850 USB Gigabit Ethernet Device */
5350 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5351 	},
5352 	{
5353 	/* LAN7801 USB Gigabit Ethernet Device */
5354 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5355 	},
5356 	{
5357 	/* ATM2-AF USB Gigabit Ethernet Device */
5358 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5359 	},
5360 	{},
5361 };
5362 MODULE_DEVICE_TABLE(usb, products);
5363 
5364 static struct usb_driver lan78xx_driver = {
5365 	.name			= DRIVER_NAME,
5366 	.id_table		= products,
5367 	.probe			= lan78xx_probe,
5368 	.disconnect		= lan78xx_disconnect,
5369 	.suspend		= lan78xx_suspend,
5370 	.resume			= lan78xx_resume,
5371 	.reset_resume		= lan78xx_reset_resume,
5372 	.supports_autosuspend	= 1,
5373 	.disable_hub_initiated_lpm = 1,
5374 };
5375 
5376 module_usb_driver(lan78xx_driver);
5377 
5378 MODULE_AUTHOR(DRIVER_AUTHOR);
5379 MODULE_DESCRIPTION(DRIVER_DESC);
5380 MODULE_LICENSE("GPL");
5381