xref: /linux/drivers/net/usb/lan78xx.c (revision 09d7ff0694ea133c50ad905fd6e548c13f8af458)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret < 0 ? ret : 0;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret < 0 ? ret : 0;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	return lan78xx_write_reg(dev, reg, buf);
678 }
679 
680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 			      struct lan78xx_statstage *data)
682 {
683 	int ret = 0;
684 	int i;
685 	struct lan78xx_statstage *stats;
686 	u32 *src;
687 	u32 *dst;
688 
689 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 	if (!stats)
691 		return -ENOMEM;
692 
693 	ret = usb_control_msg(dev->udev,
694 			      usb_rcvctrlpipe(dev->udev, 0),
695 			      USB_VENDOR_REQUEST_GET_STATS,
696 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 			      0,
698 			      0,
699 			      (void *)stats,
700 			      sizeof(*stats),
701 			      USB_CTRL_SET_TIMEOUT);
702 	if (likely(ret >= 0)) {
703 		src = (u32 *)stats;
704 		dst = (u32 *)data;
705 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 			le32_to_cpus(&src[i]);
707 			dst[i] = src[i];
708 		}
709 	} else {
710 		netdev_warn(dev->net,
711 			    "Failed to read stat ret = %d", ret);
712 	}
713 
714 	kfree(stats);
715 
716 	return ret;
717 }
718 
719 #define check_counter_rollover(struct1, dev_stats, member)		\
720 	do {								\
721 		if ((struct1)->member < (dev_stats).saved.member)	\
722 			(dev_stats).rollover_count.member++;		\
723 	} while (0)
724 
725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 					struct lan78xx_statstage *stats)
727 {
728 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775 
776 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778 
779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 	u32 *p, *count, *max;
782 	u64 *data;
783 	int i;
784 	struct lan78xx_statstage lan78xx_stats;
785 
786 	if (usb_autopm_get_interface(dev->intf) < 0)
787 		return;
788 
789 	p = (u32 *)&lan78xx_stats;
790 	count = (u32 *)&dev->stats.rollover_count;
791 	max = (u32 *)&dev->stats.rollover_max;
792 	data = (u64 *)&dev->stats.curr_stat;
793 
794 	mutex_lock(&dev->stats.access_lock);
795 
796 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798 
799 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801 
802 	mutex_unlock(&dev->stats.access_lock);
803 
804 	usb_autopm_put_interface(dev->intf);
805 }
806 
807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811 
812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 			   u32 hw_disabled)
814 {
815 	unsigned long timeout;
816 	bool stopped = true;
817 	int ret;
818 	u32 buf;
819 
820 	/* Stop the h/w block (if not already stopped) */
821 
822 	ret = lan78xx_read_reg(dev, reg, &buf);
823 	if (ret < 0)
824 		return ret;
825 
826 	if (buf & hw_enabled) {
827 		buf &= ~hw_enabled;
828 
829 		ret = lan78xx_write_reg(dev, reg, buf);
830 		if (ret < 0)
831 			return ret;
832 
833 		stopped = false;
834 		timeout = jiffies + HW_DISABLE_TIMEOUT;
835 		do  {
836 			ret = lan78xx_read_reg(dev, reg, &buf);
837 			if (ret < 0)
838 				return ret;
839 
840 			if (buf & hw_disabled)
841 				stopped = true;
842 			else
843 				msleep(HW_DISABLE_DELAY_MS);
844 		} while (!stopped && !time_after(jiffies, timeout));
845 	}
846 
847 	return stopped ? 0 : -ETIMEDOUT;
848 }
849 
850 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851 {
852 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853 }
854 
855 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856 {
857 	int ret;
858 
859 	netif_dbg(dev, drv, dev->net, "start tx path");
860 
861 	/* Start the MAC transmitter */
862 
863 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 	if (ret < 0)
865 		return ret;
866 
867 	/* Start the Tx FIFO */
868 
869 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 	if (ret < 0)
871 		return ret;
872 
873 	return 0;
874 }
875 
876 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877 {
878 	int ret;
879 
880 	netif_dbg(dev, drv, dev->net, "stop tx path");
881 
882 	/* Stop the Tx FIFO */
883 
884 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 	if (ret < 0)
886 		return ret;
887 
888 	/* Stop the MAC transmitter */
889 
890 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 	if (ret < 0)
892 		return ret;
893 
894 	return 0;
895 }
896 
897 /* The caller must ensure the Tx path is stopped before calling
898  * lan78xx_flush_tx_fifo().
899  */
900 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901 {
902 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903 }
904 
905 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906 {
907 	int ret;
908 
909 	netif_dbg(dev, drv, dev->net, "start rx path");
910 
911 	/* Start the Rx FIFO */
912 
913 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 	if (ret < 0)
915 		return ret;
916 
917 	/* Start the MAC receiver*/
918 
919 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 	if (ret < 0)
921 		return ret;
922 
923 	return 0;
924 }
925 
926 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927 {
928 	int ret;
929 
930 	netif_dbg(dev, drv, dev->net, "stop rx path");
931 
932 	/* Stop the MAC receiver */
933 
934 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 	if (ret < 0)
936 		return ret;
937 
938 	/* Stop the Rx FIFO */
939 
940 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 	if (ret < 0)
942 		return ret;
943 
944 	return 0;
945 }
946 
947 /* The caller must ensure the Rx path is stopped before calling
948  * lan78xx_flush_rx_fifo().
949  */
950 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951 {
952 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953 }
954 
955 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
956 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
957 {
958 	unsigned long start_time = jiffies;
959 	u32 val;
960 	int ret;
961 
962 	do {
963 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
964 		if (ret < 0)
965 			return ret;
966 
967 		if (!(val & MII_ACC_MII_BUSY_))
968 			return 0;
969 	} while (!time_after(jiffies, start_time + HZ));
970 
971 	return -ETIMEDOUT;
972 }
973 
974 static inline u32 mii_access(int id, int index, int read)
975 {
976 	u32 ret;
977 
978 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 	if (read)
981 		ret |= MII_ACC_MII_READ_;
982 	else
983 		ret |= MII_ACC_MII_WRITE_;
984 	ret |= MII_ACC_MII_BUSY_;
985 
986 	return ret;
987 }
988 
989 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990 {
991 	unsigned long start_time = jiffies;
992 	u32 val;
993 	int ret;
994 
995 	do {
996 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
997 		if (ret < 0)
998 			return ret;
999 
1000 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 		    (val & E2P_CMD_EPC_TIMEOUT_))
1002 			break;
1003 		usleep_range(40, 100);
1004 	} while (!time_after(jiffies, start_time + HZ));
1005 
1006 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 		netdev_warn(dev->net, "EEPROM read operation timeout");
1008 		return -ETIMEDOUT;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015 {
1016 	unsigned long start_time = jiffies;
1017 	u32 val;
1018 	int ret;
1019 
1020 	do {
1021 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1022 		if (ret < 0)
1023 			return ret;
1024 
1025 		if (!(val & E2P_CMD_EPC_BUSY_))
1026 			return 0;
1027 
1028 		usleep_range(40, 100);
1029 	} while (!time_after(jiffies, start_time + HZ));
1030 
1031 	netdev_warn(dev->net, "EEPROM is busy");
1032 	return -ETIMEDOUT;
1033 }
1034 
1035 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 				   u32 length, u8 *data)
1037 {
1038 	u32 val, saved;
1039 	int i, ret;
1040 
1041 	/* depends on chip, some EEPROM pins are muxed with LED function.
1042 	 * disable & restore LED function to access EEPROM.
1043 	 */
1044 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	saved = val;
1049 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1050 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1052 		if (ret < 0)
1053 			return ret;
1054 	}
1055 
1056 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 	if (ret == -ETIMEDOUT)
1058 		goto read_raw_eeprom_done;
1059 	/* If USB fails, there is nothing to do */
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	for (i = 0; i < length; i++) {
1064 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1067 		if (ret < 0)
1068 			return ret;
1069 
1070 		ret = lan78xx_wait_eeprom(dev);
1071 		/* Looks like not USB specific error, try to recover */
1072 		if (ret == -ETIMEDOUT)
1073 			goto read_raw_eeprom_done;
1074 		/* If USB fails, there is nothing to do */
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1079 		if (ret < 0)
1080 			return ret;
1081 
1082 		data[i] = val & 0xFF;
1083 		offset++;
1084 	}
1085 
1086 read_raw_eeprom_done:
1087 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1088 		return lan78xx_write_reg(dev, HW_CFG, saved);
1089 
1090 	return 0;
1091 }
1092 
1093 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 			       u32 length, u8 *data)
1095 {
1096 	int ret;
1097 	u8 sig;
1098 
1099 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1100 	if (ret < 0)
1101 		return ret;
1102 
1103 	if (sig != EEPROM_INDICATOR)
1104 		return -ENODATA;
1105 
1106 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1107 }
1108 
1109 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 				    u32 length, u8 *data)
1111 {
1112 	u32 val;
1113 	u32 saved;
1114 	int i, ret;
1115 
1116 	/* depends on chip, some EEPROM pins are muxed with LED function.
1117 	 * disable & restore LED function to access EEPROM.
1118 	 */
1119 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1120 	if (ret < 0)
1121 		return ret;
1122 
1123 	saved = val;
1124 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1125 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1127 		if (ret < 0)
1128 			return ret;
1129 	}
1130 
1131 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 	/* Looks like not USB specific error, try to recover */
1133 	if (ret == -ETIMEDOUT)
1134 		goto write_raw_eeprom_done;
1135 	/* If USB fails, there is nothing to do */
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	/* Issue write/erase enable command */
1140 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	ret = lan78xx_wait_eeprom(dev);
1146 	/* Looks like not USB specific error, try to recover */
1147 	if (ret == -ETIMEDOUT)
1148 		goto write_raw_eeprom_done;
1149 	/* If USB fails, there is nothing to do */
1150 	if (ret < 0)
1151 		return ret;
1152 
1153 	for (i = 0; i < length; i++) {
1154 		/* Fill data register */
1155 		val = data[i];
1156 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1157 		if (ret < 0)
1158 			return ret;
1159 
1160 		/* Send "write" command */
1161 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1164 		if (ret < 0)
1165 			return ret;
1166 
1167 		ret = lan78xx_wait_eeprom(dev);
1168 		/* Looks like not USB specific error, try to recover */
1169 		if (ret == -ETIMEDOUT)
1170 			goto write_raw_eeprom_done;
1171 		/* If USB fails, there is nothing to do */
1172 		if (ret < 0)
1173 			return ret;
1174 
1175 		offset++;
1176 	}
1177 
1178 write_raw_eeprom_done:
1179 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1180 		return lan78xx_write_reg(dev, HW_CFG, saved);
1181 
1182 	return 0;
1183 }
1184 
1185 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 				u32 length, u8 *data)
1187 {
1188 	unsigned long timeout;
1189 	int ret, i;
1190 	u32 buf;
1191 
1192 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 	if (ret < 0)
1194 		return ret;
1195 
1196 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 		/* clear it and wait to be cleared */
1198 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 		if (ret < 0)
1200 			return ret;
1201 
1202 		timeout = jiffies + HZ;
1203 		do {
1204 			usleep_range(1, 10);
1205 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 			if (ret < 0)
1207 				return ret;
1208 
1209 			if (time_after(jiffies, timeout)) {
1210 				netdev_warn(dev->net,
1211 					    "timeout on OTP_PWR_DN");
1212 				return -ETIMEDOUT;
1213 			}
1214 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1215 	}
1216 
1217 	for (i = 0; i < length; i++) {
1218 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 		if (ret < 0)
1221 			return ret;
1222 
1223 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 					((offset + i) & OTP_ADDR2_10_3));
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 		if (ret < 0)
1234 			return ret;
1235 
1236 		timeout = jiffies + HZ;
1237 		do {
1238 			udelay(1);
1239 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 			if (ret < 0)
1241 				return ret;
1242 
1243 			if (time_after(jiffies, timeout)) {
1244 				netdev_warn(dev->net,
1245 					    "timeout on OTP_STATUS");
1246 				return -ETIMEDOUT;
1247 			}
1248 		} while (buf & OTP_STATUS_BUSY_);
1249 
1250 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 		if (ret < 0)
1252 			return ret;
1253 
1254 		data[i] = (u8)(buf & 0xFF);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 				 u32 length, u8 *data)
1262 {
1263 	int i;
1264 	u32 buf;
1265 	unsigned long timeout;
1266 	int ret;
1267 
1268 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 		/* clear it and wait to be cleared */
1274 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 		if (ret < 0)
1276 			return ret;
1277 
1278 		timeout = jiffies + HZ;
1279 		do {
1280 			udelay(1);
1281 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 			if (ret < 0)
1283 				return ret;
1284 
1285 			if (time_after(jiffies, timeout)) {
1286 				netdev_warn(dev->net,
1287 					    "timeout on OTP_PWR_DN completion");
1288 				return -ETIMEDOUT;
1289 			}
1290 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1291 	}
1292 
1293 	/* set to BYTE program mode */
1294 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 	if (ret < 0)
1296 		return ret;
1297 
1298 	for (i = 0; i < length; i++) {
1299 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 		if (ret < 0)
1302 			return ret;
1303 
1304 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 					((offset + i) & OTP_ADDR2_10_3));
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 		if (ret < 0)
1319 			return ret;
1320 
1321 		timeout = jiffies + HZ;
1322 		do {
1323 			udelay(1);
1324 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 			if (ret < 0)
1326 				return ret;
1327 
1328 			if (time_after(jiffies, timeout)) {
1329 				netdev_warn(dev->net,
1330 					    "Timeout on OTP_STATUS completion");
1331 				return -ETIMEDOUT;
1332 			}
1333 		} while (buf & OTP_STATUS_BUSY_);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 			    u32 length, u8 *data)
1341 {
1342 	u8 sig;
1343 	int ret;
1344 
1345 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346 
1347 	if (ret == 0) {
1348 		if (sig == OTP_INDICATOR_2)
1349 			offset += 0x100;
1350 		else if (sig != OTP_INDICATOR_1)
1351 			ret = -EINVAL;
1352 		if (!ret)
1353 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360 {
1361 	int i, ret;
1362 
1363 	for (i = 0; i < 100; i++) {
1364 		u32 dp_sel;
1365 
1366 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 		if (unlikely(ret < 0))
1368 			return ret;
1369 
1370 		if (dp_sel & DP_SEL_DPRDY_)
1371 			return 0;
1372 
1373 		usleep_range(40, 100);
1374 	}
1375 
1376 	netdev_warn(dev->net, "%s timed out", __func__);
1377 
1378 	return -ETIMEDOUT;
1379 }
1380 
1381 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 				  u32 addr, u32 length, u32 *buf)
1383 {
1384 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 	int i, ret;
1386 
1387 	ret = usb_autopm_get_interface(dev->intf);
1388 	if (ret < 0)
1389 		return ret;
1390 
1391 	mutex_lock(&pdata->dataport_mutex);
1392 
1393 	ret = lan78xx_dataport_wait_not_busy(dev);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 	if (ret < 0)
1399 		goto dataport_write;
1400 
1401 	for (i = 0; i < length; i++) {
1402 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 
1414 		ret = lan78xx_dataport_wait_not_busy(dev);
1415 		if (ret < 0)
1416 			goto dataport_write;
1417 	}
1418 
1419 dataport_write:
1420 	if (ret < 0)
1421 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422 
1423 	mutex_unlock(&pdata->dataport_mutex);
1424 	usb_autopm_put_interface(dev->intf);
1425 
1426 	return ret;
1427 }
1428 
1429 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 				    int index, u8 addr[ETH_ALEN])
1431 {
1432 	u32 temp;
1433 
1434 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 		temp = addr[3];
1436 		temp = addr[2] | (temp << 8);
1437 		temp = addr[1] | (temp << 8);
1438 		temp = addr[0] | (temp << 8);
1439 		pdata->pfilter_table[index][1] = temp;
1440 		temp = addr[5];
1441 		temp = addr[4] | (temp << 8);
1442 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 		pdata->pfilter_table[index][0] = temp;
1444 	}
1445 }
1446 
1447 /* returns hash bit number for given MAC address */
1448 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449 {
1450 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451 }
1452 
1453 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454 {
1455 	struct lan78xx_priv *pdata =
1456 			container_of(param, struct lan78xx_priv, set_multicast);
1457 	struct lan78xx_net *dev = pdata->dev;
1458 	int i, ret;
1459 
1460 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 		  pdata->rfe_ctl);
1462 
1463 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 				     DP_SEL_VHF_VLAN_LEN,
1465 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 	if (ret < 0)
1467 		goto multicast_write_done;
1468 
1469 	for (i = 1; i < NUM_OF_MAF; i++) {
1470 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 		if (ret < 0)
1472 			goto multicast_write_done;
1473 
1474 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 					pdata->pfilter_table[i][1]);
1476 		if (ret < 0)
1477 			goto multicast_write_done;
1478 
1479 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 					pdata->pfilter_table[i][0]);
1481 		if (ret < 0)
1482 			goto multicast_write_done;
1483 	}
1484 
1485 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486 
1487 multicast_write_done:
1488 	if (ret < 0)
1489 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 	return;
1491 }
1492 
1493 static void lan78xx_set_multicast(struct net_device *netdev)
1494 {
1495 	struct lan78xx_net *dev = netdev_priv(netdev);
1496 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 	unsigned long flags;
1498 	int i;
1499 
1500 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501 
1502 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504 
1505 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1506 		pdata->mchash_table[i] = 0;
1507 
1508 	/* pfilter_table[0] has own HW address */
1509 	for (i = 1; i < NUM_OF_MAF; i++) {
1510 		pdata->pfilter_table[i][0] = 0;
1511 		pdata->pfilter_table[i][1] = 0;
1512 	}
1513 
1514 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515 
1516 	if (dev->net->flags & IFF_PROMISC) {
1517 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 	} else {
1520 		if (dev->net->flags & IFF_ALLMULTI) {
1521 			netif_dbg(dev, drv, dev->net,
1522 				  "receive all multicast enabled");
1523 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 		}
1525 	}
1526 
1527 	if (netdev_mc_count(dev->net)) {
1528 		struct netdev_hw_addr *ha;
1529 		int i;
1530 
1531 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532 
1533 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534 
1535 		i = 1;
1536 		netdev_for_each_mc_addr(ha, netdev) {
1537 			/* set first 32 into Perfect Filter */
1538 			if (i < 33) {
1539 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 			} else {
1541 				u32 bitnum = lan78xx_hash(ha->addr);
1542 
1543 				pdata->mchash_table[bitnum / 32] |=
1544 							(1 << (bitnum % 32));
1545 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 			}
1547 			i++;
1548 		}
1549 	}
1550 
1551 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552 
1553 	/* defer register writes to a sleepable context */
1554 	schedule_work(&pdata->set_multicast);
1555 }
1556 
1557 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
1558 					 bool tx_pause, bool rx_pause);
1559 
1560 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1561 				      u16 lcladv, u16 rmtadv)
1562 {
1563 	u8 cap;
1564 
1565 	if (dev->fc_autoneg)
1566 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1567 	else
1568 		cap = dev->fc_request_control;
1569 
1570 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1571 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1572 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1573 
1574 	return lan78xx_configure_flowcontrol(dev,
1575 					     cap & FLOW_CTRL_TX,
1576 					     cap & FLOW_CTRL_RX);
1577 }
1578 
1579 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1580 
1581 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1582 {
1583 	unsigned long start_time = jiffies;
1584 	u32 val;
1585 	int ret;
1586 
1587 	mutex_lock(&dev->mdiobus_mutex);
1588 
1589 	/* Resetting the device while there is activity on the MDIO
1590 	 * bus can result in the MAC interface locking up and not
1591 	 * completing register access transactions.
1592 	 */
1593 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1594 	if (ret < 0)
1595 		goto exit_unlock;
1596 
1597 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1598 	if (ret < 0)
1599 		goto exit_unlock;
1600 
1601 	val |= MAC_CR_RST_;
1602 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1603 	if (ret < 0)
1604 		goto exit_unlock;
1605 
1606 	/* Wait for the reset to complete before allowing any further
1607 	 * MAC register accesses otherwise the MAC may lock up.
1608 	 */
1609 	do {
1610 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1611 		if (ret < 0)
1612 			goto exit_unlock;
1613 
1614 		if (!(val & MAC_CR_RST_)) {
1615 			ret = 0;
1616 			goto exit_unlock;
1617 		}
1618 	} while (!time_after(jiffies, start_time + HZ));
1619 
1620 	ret = -ETIMEDOUT;
1621 exit_unlock:
1622 	mutex_unlock(&dev->mdiobus_mutex);
1623 
1624 	return ret;
1625 }
1626 
1627 /**
1628  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1629  * @dev: pointer to the LAN78xx device structure
1630  *
1631  * This function acknowledges the PHY interrupt by setting the
1632  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1633  *
1634  * Return: 0 on success or a negative error code on failure.
1635  */
1636 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1637 {
1638 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1639 }
1640 
1641 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
1642 
1643 static int lan78xx_link_reset(struct lan78xx_net *dev)
1644 {
1645 	struct phy_device *phydev = dev->net->phydev;
1646 	struct ethtool_link_ksettings ecmd;
1647 	int ladv, radv, ret, link;
1648 
1649 	/* clear LAN78xx interrupt status */
1650 	ret = lan78xx_phy_int_ack(dev);
1651 	if (unlikely(ret < 0))
1652 		return ret;
1653 
1654 	mutex_lock(&phydev->lock);
1655 	phy_read_status(phydev);
1656 	link = phydev->link;
1657 	mutex_unlock(&phydev->lock);
1658 
1659 	if (!link && dev->link_on) {
1660 		dev->link_on = false;
1661 
1662 		/* reset MAC */
1663 		ret = lan78xx_mac_reset(dev);
1664 		if (ret < 0)
1665 			return ret;
1666 
1667 		timer_delete(&dev->stat_monitor);
1668 	} else if (link && !dev->link_on) {
1669 		dev->link_on = true;
1670 
1671 		phy_ethtool_ksettings_get(phydev, &ecmd);
1672 
1673 		ret = lan78xx_configure_usb(dev, ecmd.base.speed);
1674 		if (ret < 0)
1675 			return ret;
1676 
1677 		ladv = phy_read(phydev, MII_ADVERTISE);
1678 		if (ladv < 0)
1679 			return ladv;
1680 
1681 		radv = phy_read(phydev, MII_LPA);
1682 		if (radv < 0)
1683 			return radv;
1684 
1685 		netif_dbg(dev, link, dev->net,
1686 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1687 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1688 
1689 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1690 						 radv);
1691 		if (ret < 0)
1692 			return ret;
1693 
1694 		if (!timer_pending(&dev->stat_monitor)) {
1695 			dev->delta = 1;
1696 			mod_timer(&dev->stat_monitor,
1697 				  jiffies + STAT_UPDATE_TIMER);
1698 		}
1699 
1700 		lan78xx_rx_urb_submit_all(dev);
1701 
1702 		local_bh_disable();
1703 		napi_schedule(&dev->napi);
1704 		local_bh_enable();
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 /* some work can't be done in tasklets, so we use keventd
1711  *
1712  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1713  * but tasklet_schedule() doesn't.	hope the failure is rare.
1714  */
1715 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1716 {
1717 	set_bit(work, &dev->flags);
1718 	if (!schedule_delayed_work(&dev->wq, 0))
1719 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1720 }
1721 
1722 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1723 {
1724 	u32 intdata;
1725 
1726 	if (urb->actual_length != 4) {
1727 		netdev_warn(dev->net,
1728 			    "unexpected urb length %d", urb->actual_length);
1729 		return;
1730 	}
1731 
1732 	intdata = get_unaligned_le32(urb->transfer_buffer);
1733 
1734 	if (intdata & INT_ENP_PHY_INT) {
1735 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1736 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1737 
1738 		if (dev->domain_data.phyirq > 0)
1739 			generic_handle_irq_safe(dev->domain_data.phyirq);
1740 	} else {
1741 		netdev_warn(dev->net,
1742 			    "unexpected interrupt: 0x%08x\n", intdata);
1743 	}
1744 }
1745 
1746 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1747 {
1748 	return MAX_EEPROM_SIZE;
1749 }
1750 
1751 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1752 				      struct ethtool_eeprom *ee, u8 *data)
1753 {
1754 	struct lan78xx_net *dev = netdev_priv(netdev);
1755 	int ret;
1756 
1757 	ret = usb_autopm_get_interface(dev->intf);
1758 	if (ret)
1759 		return ret;
1760 
1761 	ee->magic = LAN78XX_EEPROM_MAGIC;
1762 
1763 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1764 
1765 	usb_autopm_put_interface(dev->intf);
1766 
1767 	return ret;
1768 }
1769 
1770 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1771 				      struct ethtool_eeprom *ee, u8 *data)
1772 {
1773 	struct lan78xx_net *dev = netdev_priv(netdev);
1774 	int ret;
1775 
1776 	ret = usb_autopm_get_interface(dev->intf);
1777 	if (ret)
1778 		return ret;
1779 
1780 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1781 	 * to load data from EEPROM
1782 	 */
1783 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1784 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1785 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1786 		 (ee->offset == 0) &&
1787 		 (ee->len == 512) &&
1788 		 (data[0] == OTP_INDICATOR_1))
1789 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1790 
1791 	usb_autopm_put_interface(dev->intf);
1792 
1793 	return ret;
1794 }
1795 
1796 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1797 				u8 *data)
1798 {
1799 	if (stringset == ETH_SS_STATS)
1800 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1801 }
1802 
1803 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1804 {
1805 	if (sset == ETH_SS_STATS)
1806 		return ARRAY_SIZE(lan78xx_gstrings);
1807 	else
1808 		return -EOPNOTSUPP;
1809 }
1810 
1811 static void lan78xx_get_stats(struct net_device *netdev,
1812 			      struct ethtool_stats *stats, u64 *data)
1813 {
1814 	struct lan78xx_net *dev = netdev_priv(netdev);
1815 
1816 	lan78xx_update_stats(dev);
1817 
1818 	mutex_lock(&dev->stats.access_lock);
1819 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1820 	mutex_unlock(&dev->stats.access_lock);
1821 }
1822 
1823 static void lan78xx_get_wol(struct net_device *netdev,
1824 			    struct ethtool_wolinfo *wol)
1825 {
1826 	struct lan78xx_net *dev = netdev_priv(netdev);
1827 	int ret;
1828 	u32 buf;
1829 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1830 
1831 	if (usb_autopm_get_interface(dev->intf) < 0)
1832 		return;
1833 
1834 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1835 	if (unlikely(ret < 0)) {
1836 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1837 		wol->supported = 0;
1838 		wol->wolopts = 0;
1839 	} else {
1840 		if (buf & USB_CFG_RMT_WKP_) {
1841 			wol->supported = WAKE_ALL;
1842 			wol->wolopts = pdata->wol;
1843 		} else {
1844 			wol->supported = 0;
1845 			wol->wolopts = 0;
1846 		}
1847 	}
1848 
1849 	usb_autopm_put_interface(dev->intf);
1850 }
1851 
1852 static int lan78xx_set_wol(struct net_device *netdev,
1853 			   struct ethtool_wolinfo *wol)
1854 {
1855 	struct lan78xx_net *dev = netdev_priv(netdev);
1856 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 	int ret;
1858 
1859 	if (wol->wolopts & ~WAKE_ALL)
1860 		return -EINVAL;
1861 
1862 	ret = usb_autopm_get_interface(dev->intf);
1863 	if (ret < 0)
1864 		return ret;
1865 
1866 	pdata->wol = wol->wolopts;
1867 
1868 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1869 	if (ret < 0)
1870 		goto exit_pm_put;
1871 
1872 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1873 
1874 exit_pm_put:
1875 	usb_autopm_put_interface(dev->intf);
1876 
1877 	return ret;
1878 }
1879 
1880 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1881 {
1882 	struct lan78xx_net *dev = netdev_priv(net);
1883 	struct phy_device *phydev = net->phydev;
1884 	int ret;
1885 	u32 buf;
1886 
1887 	ret = usb_autopm_get_interface(dev->intf);
1888 	if (ret < 0)
1889 		return ret;
1890 
1891 	ret = phy_ethtool_get_eee(phydev, edata);
1892 	if (ret < 0)
1893 		goto exit;
1894 
1895 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1896 	if (buf & MAC_CR_EEE_EN_) {
1897 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1898 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1899 		edata->tx_lpi_timer = buf;
1900 	} else {
1901 		edata->tx_lpi_timer = 0;
1902 	}
1903 
1904 	ret = 0;
1905 exit:
1906 	usb_autopm_put_interface(dev->intf);
1907 
1908 	return ret;
1909 }
1910 
1911 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1912 {
1913 	struct lan78xx_net *dev = netdev_priv(net);
1914 	int ret;
1915 	u32 buf;
1916 
1917 	ret = usb_autopm_get_interface(dev->intf);
1918 	if (ret < 0)
1919 		return ret;
1920 
1921 	ret = phy_ethtool_set_eee(net->phydev, edata);
1922 	if (ret < 0)
1923 		goto out;
1924 
1925 	buf = (u32)edata->tx_lpi_timer;
1926 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1927 out:
1928 	usb_autopm_put_interface(dev->intf);
1929 
1930 	return ret;
1931 }
1932 
1933 static u32 lan78xx_get_link(struct net_device *net)
1934 {
1935 	u32 link;
1936 
1937 	mutex_lock(&net->phydev->lock);
1938 	phy_read_status(net->phydev);
1939 	link = net->phydev->link;
1940 	mutex_unlock(&net->phydev->lock);
1941 
1942 	return link;
1943 }
1944 
1945 static void lan78xx_get_drvinfo(struct net_device *net,
1946 				struct ethtool_drvinfo *info)
1947 {
1948 	struct lan78xx_net *dev = netdev_priv(net);
1949 
1950 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1951 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1952 }
1953 
1954 static u32 lan78xx_get_msglevel(struct net_device *net)
1955 {
1956 	struct lan78xx_net *dev = netdev_priv(net);
1957 
1958 	return dev->msg_enable;
1959 }
1960 
1961 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1962 {
1963 	struct lan78xx_net *dev = netdev_priv(net);
1964 
1965 	dev->msg_enable = level;
1966 }
1967 
1968 static int lan78xx_get_link_ksettings(struct net_device *net,
1969 				      struct ethtool_link_ksettings *cmd)
1970 {
1971 	struct lan78xx_net *dev = netdev_priv(net);
1972 	struct phy_device *phydev = net->phydev;
1973 	int ret;
1974 
1975 	ret = usb_autopm_get_interface(dev->intf);
1976 	if (ret < 0)
1977 		return ret;
1978 
1979 	phy_ethtool_ksettings_get(phydev, cmd);
1980 
1981 	usb_autopm_put_interface(dev->intf);
1982 
1983 	return ret;
1984 }
1985 
1986 static int lan78xx_set_link_ksettings(struct net_device *net,
1987 				      const struct ethtool_link_ksettings *cmd)
1988 {
1989 	struct lan78xx_net *dev = netdev_priv(net);
1990 	struct phy_device *phydev = net->phydev;
1991 	int ret = 0;
1992 	int temp;
1993 
1994 	ret = usb_autopm_get_interface(dev->intf);
1995 	if (ret < 0)
1996 		return ret;
1997 
1998 	/* change speed & duplex */
1999 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2000 
2001 	if (!cmd->base.autoneg) {
2002 		/* force link down */
2003 		temp = phy_read(phydev, MII_BMCR);
2004 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2005 		mdelay(1);
2006 		phy_write(phydev, MII_BMCR, temp);
2007 	}
2008 
2009 	usb_autopm_put_interface(dev->intf);
2010 
2011 	return ret;
2012 }
2013 
2014 static void lan78xx_get_pause(struct net_device *net,
2015 			      struct ethtool_pauseparam *pause)
2016 {
2017 	struct lan78xx_net *dev = netdev_priv(net);
2018 	struct phy_device *phydev = net->phydev;
2019 	struct ethtool_link_ksettings ecmd;
2020 
2021 	phy_ethtool_ksettings_get(phydev, &ecmd);
2022 
2023 	pause->autoneg = dev->fc_autoneg;
2024 
2025 	if (dev->fc_request_control & FLOW_CTRL_TX)
2026 		pause->tx_pause = 1;
2027 
2028 	if (dev->fc_request_control & FLOW_CTRL_RX)
2029 		pause->rx_pause = 1;
2030 }
2031 
2032 static int lan78xx_set_pause(struct net_device *net,
2033 			     struct ethtool_pauseparam *pause)
2034 {
2035 	struct lan78xx_net *dev = netdev_priv(net);
2036 	struct phy_device *phydev = net->phydev;
2037 	struct ethtool_link_ksettings ecmd;
2038 	int ret;
2039 
2040 	phy_ethtool_ksettings_get(phydev, &ecmd);
2041 
2042 	if (pause->autoneg && !ecmd.base.autoneg) {
2043 		ret = -EINVAL;
2044 		goto exit;
2045 	}
2046 
2047 	dev->fc_request_control = 0;
2048 	if (pause->rx_pause)
2049 		dev->fc_request_control |= FLOW_CTRL_RX;
2050 
2051 	if (pause->tx_pause)
2052 		dev->fc_request_control |= FLOW_CTRL_TX;
2053 
2054 	if (ecmd.base.autoneg) {
2055 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2056 		u32 mii_adv;
2057 
2058 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2059 				   ecmd.link_modes.advertising);
2060 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2061 				   ecmd.link_modes.advertising);
2062 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2063 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2064 		linkmode_or(ecmd.link_modes.advertising, fc,
2065 			    ecmd.link_modes.advertising);
2066 
2067 		phy_ethtool_ksettings_set(phydev, &ecmd);
2068 	}
2069 
2070 	dev->fc_autoneg = pause->autoneg;
2071 
2072 	ret = 0;
2073 exit:
2074 	return ret;
2075 }
2076 
2077 static int lan78xx_get_regs_len(struct net_device *netdev)
2078 {
2079 	return sizeof(lan78xx_regs);
2080 }
2081 
2082 static void
2083 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2084 		 void *buf)
2085 {
2086 	struct lan78xx_net *dev = netdev_priv(netdev);
2087 	unsigned int data_count = 0;
2088 	u32 *data = buf;
2089 	int i, ret;
2090 
2091 	/* Read Device/MAC registers */
2092 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2093 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2094 		if (ret < 0) {
2095 			netdev_warn(dev->net,
2096 				    "failed to read register 0x%08x\n",
2097 				    lan78xx_regs[i]);
2098 			goto clean_data;
2099 		}
2100 
2101 		data_count++;
2102 	}
2103 
2104 	return;
2105 
2106 clean_data:
2107 	memset(data, 0, data_count * sizeof(u32));
2108 }
2109 
2110 static const struct ethtool_ops lan78xx_ethtool_ops = {
2111 	.get_link	= lan78xx_get_link,
2112 	.nway_reset	= phy_ethtool_nway_reset,
2113 	.get_drvinfo	= lan78xx_get_drvinfo,
2114 	.get_msglevel	= lan78xx_get_msglevel,
2115 	.set_msglevel	= lan78xx_set_msglevel,
2116 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2117 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2118 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2119 	.get_ethtool_stats = lan78xx_get_stats,
2120 	.get_sset_count = lan78xx_get_sset_count,
2121 	.get_strings	= lan78xx_get_strings,
2122 	.get_wol	= lan78xx_get_wol,
2123 	.set_wol	= lan78xx_set_wol,
2124 	.get_ts_info	= ethtool_op_get_ts_info,
2125 	.get_eee	= lan78xx_get_eee,
2126 	.set_eee	= lan78xx_set_eee,
2127 	.get_pauseparam	= lan78xx_get_pause,
2128 	.set_pauseparam	= lan78xx_set_pause,
2129 	.get_link_ksettings = lan78xx_get_link_ksettings,
2130 	.set_link_ksettings = lan78xx_set_link_ksettings,
2131 	.get_regs_len	= lan78xx_get_regs_len,
2132 	.get_regs	= lan78xx_get_regs,
2133 };
2134 
2135 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2136 {
2137 	u32 addr_lo, addr_hi;
2138 	u8 addr[6];
2139 	int ret;
2140 
2141 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2142 	if (ret < 0)
2143 		return ret;
2144 
2145 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2146 	if (ret < 0)
2147 		return ret;
2148 
2149 	addr[0] = addr_lo & 0xFF;
2150 	addr[1] = (addr_lo >> 8) & 0xFF;
2151 	addr[2] = (addr_lo >> 16) & 0xFF;
2152 	addr[3] = (addr_lo >> 24) & 0xFF;
2153 	addr[4] = addr_hi & 0xFF;
2154 	addr[5] = (addr_hi >> 8) & 0xFF;
2155 
2156 	if (!is_valid_ether_addr(addr)) {
2157 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2158 			/* valid address present in Device Tree */
2159 			netif_dbg(dev, ifup, dev->net,
2160 				  "MAC address read from Device Tree");
2161 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2162 						 ETH_ALEN, addr) == 0) ||
2163 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2164 					      ETH_ALEN, addr) == 0)) &&
2165 			   is_valid_ether_addr(addr)) {
2166 			/* eeprom values are valid so use them */
2167 			netif_dbg(dev, ifup, dev->net,
2168 				  "MAC address read from EEPROM");
2169 		} else {
2170 			/* generate random MAC */
2171 			eth_random_addr(addr);
2172 			netif_dbg(dev, ifup, dev->net,
2173 				  "MAC address set to random addr");
2174 		}
2175 
2176 		addr_lo = addr[0] | (addr[1] << 8) |
2177 			  (addr[2] << 16) | (addr[3] << 24);
2178 		addr_hi = addr[4] | (addr[5] << 8);
2179 
2180 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2181 		if (ret < 0)
2182 			return ret;
2183 
2184 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2185 		if (ret < 0)
2186 			return ret;
2187 	}
2188 
2189 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2190 	if (ret < 0)
2191 		return ret;
2192 
2193 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2194 	if (ret < 0)
2195 		return ret;
2196 
2197 	eth_hw_addr_set(dev->net, addr);
2198 
2199 	return 0;
2200 }
2201 
2202 /* MDIO read and write wrappers for phylib */
2203 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2204 {
2205 	struct lan78xx_net *dev = bus->priv;
2206 	u32 val, addr;
2207 	int ret;
2208 
2209 	ret = usb_autopm_get_interface(dev->intf);
2210 	if (ret < 0)
2211 		return ret;
2212 
2213 	mutex_lock(&dev->mdiobus_mutex);
2214 
2215 	/* confirm MII not busy */
2216 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2217 	if (ret < 0)
2218 		goto done;
2219 
2220 	/* set the address, index & direction (read from PHY) */
2221 	addr = mii_access(phy_id, idx, MII_READ);
2222 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2223 	if (ret < 0)
2224 		goto done;
2225 
2226 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2227 	if (ret < 0)
2228 		goto done;
2229 
2230 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2231 	if (ret < 0)
2232 		goto done;
2233 
2234 	ret = (int)(val & 0xFFFF);
2235 
2236 done:
2237 	mutex_unlock(&dev->mdiobus_mutex);
2238 	usb_autopm_put_interface(dev->intf);
2239 
2240 	return ret;
2241 }
2242 
2243 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2244 				 u16 regval)
2245 {
2246 	struct lan78xx_net *dev = bus->priv;
2247 	u32 val, addr;
2248 	int ret;
2249 
2250 	ret = usb_autopm_get_interface(dev->intf);
2251 	if (ret < 0)
2252 		return ret;
2253 
2254 	mutex_lock(&dev->mdiobus_mutex);
2255 
2256 	/* confirm MII not busy */
2257 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2258 	if (ret < 0)
2259 		goto done;
2260 
2261 	val = (u32)regval;
2262 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2263 	if (ret < 0)
2264 		goto done;
2265 
2266 	/* set the address, index & direction (write to PHY) */
2267 	addr = mii_access(phy_id, idx, MII_WRITE);
2268 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2269 	if (ret < 0)
2270 		goto done;
2271 
2272 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2273 	if (ret < 0)
2274 		goto done;
2275 
2276 done:
2277 	mutex_unlock(&dev->mdiobus_mutex);
2278 	usb_autopm_put_interface(dev->intf);
2279 	return ret;
2280 }
2281 
2282 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2283 {
2284 	struct device_node *node;
2285 	int ret;
2286 
2287 	dev->mdiobus = mdiobus_alloc();
2288 	if (!dev->mdiobus) {
2289 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2290 		return -ENOMEM;
2291 	}
2292 
2293 	dev->mdiobus->priv = (void *)dev;
2294 	dev->mdiobus->read = lan78xx_mdiobus_read;
2295 	dev->mdiobus->write = lan78xx_mdiobus_write;
2296 	dev->mdiobus->name = "lan78xx-mdiobus";
2297 	dev->mdiobus->parent = &dev->udev->dev;
2298 
2299 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2300 		 dev->udev->bus->busnum, dev->udev->devnum);
2301 
2302 	switch (dev->chipid) {
2303 	case ID_REV_CHIP_ID_7800_:
2304 	case ID_REV_CHIP_ID_7850_:
2305 		/* set to internal PHY id */
2306 		dev->mdiobus->phy_mask = ~(1 << 1);
2307 		break;
2308 	case ID_REV_CHIP_ID_7801_:
2309 		/* scan thru PHYAD[2..0] */
2310 		dev->mdiobus->phy_mask = ~(0xFF);
2311 		break;
2312 	}
2313 
2314 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2315 	ret = of_mdiobus_register(dev->mdiobus, node);
2316 	of_node_put(node);
2317 	if (ret) {
2318 		netdev_err(dev->net, "can't register MDIO bus\n");
2319 		goto exit1;
2320 	}
2321 
2322 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2323 	return 0;
2324 exit1:
2325 	mdiobus_free(dev->mdiobus);
2326 	return ret;
2327 }
2328 
2329 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2330 {
2331 	mdiobus_unregister(dev->mdiobus);
2332 	mdiobus_free(dev->mdiobus);
2333 }
2334 
2335 static void lan78xx_link_status_change(struct net_device *net)
2336 {
2337 	struct lan78xx_net *dev = netdev_priv(net);
2338 	struct phy_device *phydev = net->phydev;
2339 	u32 data;
2340 	int ret;
2341 
2342 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2343 	if (ret < 0)
2344 		return;
2345 
2346 	if (phydev->enable_tx_lpi)
2347 		data |=  MAC_CR_EEE_EN_;
2348 	else
2349 		data &= ~MAC_CR_EEE_EN_;
2350 	lan78xx_write_reg(dev, MAC_CR, data);
2351 
2352 	phy_print_status(phydev);
2353 }
2354 
2355 static int irq_map(struct irq_domain *d, unsigned int irq,
2356 		   irq_hw_number_t hwirq)
2357 {
2358 	struct irq_domain_data *data = d->host_data;
2359 
2360 	irq_set_chip_data(irq, data);
2361 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2362 	irq_set_noprobe(irq);
2363 
2364 	return 0;
2365 }
2366 
2367 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2368 {
2369 	irq_set_chip_and_handler(irq, NULL, NULL);
2370 	irq_set_chip_data(irq, NULL);
2371 }
2372 
2373 static const struct irq_domain_ops chip_domain_ops = {
2374 	.map	= irq_map,
2375 	.unmap	= irq_unmap,
2376 };
2377 
2378 static void lan78xx_irq_mask(struct irq_data *irqd)
2379 {
2380 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2381 
2382 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2383 }
2384 
2385 static void lan78xx_irq_unmask(struct irq_data *irqd)
2386 {
2387 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2388 
2389 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2390 }
2391 
2392 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2393 {
2394 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2395 
2396 	mutex_lock(&data->irq_lock);
2397 }
2398 
2399 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2400 {
2401 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2402 	struct lan78xx_net *dev =
2403 			container_of(data, struct lan78xx_net, domain_data);
2404 	u32 buf;
2405 	int ret;
2406 
2407 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2408 	 * are only two callbacks executed in non-atomic contex.
2409 	 */
2410 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2411 	if (ret < 0)
2412 		goto irq_bus_sync_unlock;
2413 
2414 	if (buf != data->irqenable)
2415 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2416 
2417 irq_bus_sync_unlock:
2418 	if (ret < 0)
2419 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2420 			   ERR_PTR(ret));
2421 
2422 	mutex_unlock(&data->irq_lock);
2423 }
2424 
2425 static struct irq_chip lan78xx_irqchip = {
2426 	.name			= "lan78xx-irqs",
2427 	.irq_mask		= lan78xx_irq_mask,
2428 	.irq_unmask		= lan78xx_irq_unmask,
2429 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2430 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2431 };
2432 
2433 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2434 {
2435 	struct device_node *of_node;
2436 	struct irq_domain *irqdomain;
2437 	unsigned int irqmap = 0;
2438 	u32 buf;
2439 	int ret = 0;
2440 
2441 	of_node = dev->udev->dev.parent->of_node;
2442 
2443 	mutex_init(&dev->domain_data.irq_lock);
2444 
2445 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2446 	if (ret < 0)
2447 		return ret;
2448 
2449 	dev->domain_data.irqenable = buf;
2450 
2451 	dev->domain_data.irqchip = &lan78xx_irqchip;
2452 	dev->domain_data.irq_handler = handle_simple_irq;
2453 
2454 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2455 					  &chip_domain_ops, &dev->domain_data);
2456 	if (irqdomain) {
2457 		/* create mapping for PHY interrupt */
2458 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2459 		if (!irqmap) {
2460 			irq_domain_remove(irqdomain);
2461 
2462 			irqdomain = NULL;
2463 			ret = -EINVAL;
2464 		}
2465 	} else {
2466 		ret = -EINVAL;
2467 	}
2468 
2469 	dev->domain_data.irqdomain = irqdomain;
2470 	dev->domain_data.phyirq = irqmap;
2471 
2472 	return ret;
2473 }
2474 
2475 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2476 {
2477 	if (dev->domain_data.phyirq > 0) {
2478 		irq_dispose_mapping(dev->domain_data.phyirq);
2479 
2480 		if (dev->domain_data.irqdomain)
2481 			irq_domain_remove(dev->domain_data.irqdomain);
2482 	}
2483 	dev->domain_data.phyirq = 0;
2484 	dev->domain_data.irqdomain = NULL;
2485 }
2486 
2487 /**
2488  * lan78xx_configure_usb - Configure USB link power settings
2489  * @dev: pointer to the LAN78xx device structure
2490  * @speed: negotiated Ethernet link speed (in Mbps)
2491  *
2492  * This function configures U1/U2 link power management for SuperSpeed
2493  * USB devices based on the current Ethernet link speed. It uses the
2494  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2495  *
2496  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2497  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2498  *
2499  * Return: 0 on success or a negative error code on failure.
2500  */
2501 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2502 {
2503 	u32 mask, val;
2504 	int ret;
2505 
2506 	/* Only configure USB settings for SuperSpeed devices */
2507 	if (dev->udev->speed != USB_SPEED_SUPER)
2508 		return 0;
2509 
2510 	/* LAN7850 does not support USB 3.x */
2511 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2512 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2513 		return 0;
2514 	}
2515 
2516 	switch (speed) {
2517 	case SPEED_1000:
2518 		/* Disable U2, enable U1 */
2519 		ret = lan78xx_update_reg(dev, USB_CFG1,
2520 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2521 		if (ret < 0)
2522 			return ret;
2523 
2524 		return lan78xx_update_reg(dev, USB_CFG1,
2525 					  USB_CFG1_DEV_U1_INIT_EN_,
2526 					  USB_CFG1_DEV_U1_INIT_EN_);
2527 
2528 	case SPEED_100:
2529 	case SPEED_10:
2530 		/* Enable both U1 and U2 */
2531 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2532 		val = mask;
2533 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2534 
2535 	default:
2536 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2537 		return -EINVAL;
2538 	}
2539 }
2540 
2541 /**
2542  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2543  * @dev: pointer to the LAN78xx device structure
2544  * @tx_pause: enable transmission of pause frames
2545  * @rx_pause: enable reception of pause frames
2546  *
2547  * This function configures the LAN78xx flow control settings by writing
2548  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2549  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2550  * based on USB speed.
2551  *
2552  * The Pause Time field is measured in units of 512-bit times (quanta):
2553  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2554  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2555  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2556  *
2557  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2558  *   - RXUSED is the number of bytes used in the RX FIFO
2559  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2560  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2561  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2562  *
2563  * Thresholds differ by USB speed because available USB bandwidth
2564  * affects how fast packets can be drained from the RX FIFO:
2565  *   - USB 3.x (SuperSpeed):
2566  *       FLOW_ON  = 9216 bytes → 18 units
2567  *       FLOW_OFF = 4096 bytes →  8 units
2568  *   - USB 2.0 (High-Speed):
2569  *       FLOW_ON  = 8704 bytes → 17 units
2570  *       FLOW_OFF = 1024 bytes →  2 units
2571  *
2572  * Note: The FCT_FLOW register must be configured before enabling TX pause
2573  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2574  *
2575  * Return: 0 on success or a negative error code on failure.
2576  */
2577 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2578 					 bool tx_pause, bool rx_pause)
2579 {
2580 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2581 	const u32 pause_time_quanta = 65535;
2582 	u32 fct_flow = 0;
2583 	u32 flow = 0;
2584 	int ret;
2585 
2586 	/* Prepare MAC flow control bits */
2587 	if (tx_pause)
2588 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2589 
2590 	if (rx_pause)
2591 		flow |= FLOW_CR_RX_FCEN_;
2592 
2593 	/* Select RX FIFO thresholds based on USB speed
2594 	 *
2595 	 * FCT_FLOW layout:
2596 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2597 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2598 	 *   thresholds are expressed in units of 512 bytes
2599 	 */
2600 	switch (dev->udev->speed) {
2601 	case USB_SPEED_SUPER:
2602 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2603 		break;
2604 	case USB_SPEED_HIGH:
2605 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2606 		break;
2607 	default:
2608 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2609 			    dev->udev->speed);
2610 		return -EINVAL;
2611 	}
2612 
2613 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2614 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2615 	if (ret < 0)
2616 		return ret;
2617 
2618 	/* Step 2: Enable MAC pause functionality */
2619 	return lan78xx_write_reg(dev, FLOW, flow);
2620 }
2621 
2622 /**
2623  * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
2624  * @dev: LAN78xx device
2625  *
2626  * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
2627  * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
2628  * switch without a visible PHY.
2629  *
2630  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2631  */
2632 static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
2633 {
2634 	struct fixed_phy_status fphy_status = {
2635 		.link = 1,
2636 		.speed = SPEED_1000,
2637 		.duplex = DUPLEX_FULL,
2638 	};
2639 
2640 	netdev_info(dev->net,
2641 		    "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
2642 
2643 	return fixed_phy_register(&fphy_status, NULL);
2644 }
2645 
2646 /**
2647  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2648  * @dev: LAN78xx device structure
2649  *
2650  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2651  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2652  * sets dev->interface based on chip ID and detected PHY type.
2653  *
2654  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2655  */
2656 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2657 {
2658 	struct phy_device *phydev;
2659 
2660 	/* Attempt to locate a PHY on the MDIO bus */
2661 	phydev = phy_find_first(dev->mdiobus);
2662 
2663 	switch (dev->chipid) {
2664 	case ID_REV_CHIP_ID_7801_:
2665 		if (phydev) {
2666 			/* External RGMII PHY detected */
2667 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2668 			phydev->is_internal = false;
2669 
2670 			if (!phydev->drv)
2671 				netdev_warn(dev->net,
2672 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2673 
2674 			return phydev;
2675 		}
2676 
2677 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2678 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2679 		return lan78xx_register_fixed_phy(dev);
2680 
2681 	case ID_REV_CHIP_ID_7800_:
2682 	case ID_REV_CHIP_ID_7850_:
2683 		if (!phydev)
2684 			return ERR_PTR(-ENODEV);
2685 
2686 		/* These use internal GMII-connected PHY */
2687 		dev->interface = PHY_INTERFACE_MODE_GMII;
2688 		phydev->is_internal = true;
2689 		return phydev;
2690 
2691 	default:
2692 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2693 		return ERR_PTR(-ENODEV);
2694 	}
2695 }
2696 
2697 /**
2698  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2699  * @dev: LAN78xx device
2700  *
2701  * Configure MAC-side registers according to dev->interface, which should be
2702  * set by lan78xx_get_phy().
2703  *
2704  * - For PHY_INTERFACE_MODE_RGMII:
2705  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2706  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2707  *   connected to the KSZ9897 switch, and the link timing is expected to be
2708  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2709  *   assumed here.
2710  *
2711  * - For PHY_INTERFACE_MODE_RGMII_ID:
2712  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2713  *
2714  * - For GMII, no MAC-specific config is needed.
2715  *
2716  * Return: 0 on success or a negative error code.
2717  */
2718 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2719 {
2720 	int ret;
2721 
2722 	switch (dev->interface) {
2723 	case PHY_INTERFACE_MODE_RGMII:
2724 		/* Enable MAC-side TX clock delay */
2725 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2726 					MAC_RGMII_ID_TXC_DELAY_EN_);
2727 		if (ret < 0)
2728 			return ret;
2729 
2730 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2731 		if (ret < 0)
2732 			return ret;
2733 
2734 		ret = lan78xx_update_reg(dev, HW_CFG,
2735 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2736 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2737 		if (ret < 0)
2738 			return ret;
2739 
2740 		break;
2741 
2742 	case PHY_INTERFACE_MODE_RGMII_ID:
2743 		/* Disable MAC-side TXC delay, PHY provides it */
2744 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2745 		if (ret < 0)
2746 			return ret;
2747 
2748 		break;
2749 
2750 	case PHY_INTERFACE_MODE_GMII:
2751 		/* No MAC-specific configuration required */
2752 		break;
2753 
2754 	default:
2755 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2756 			    dev->interface);
2757 		break;
2758 	}
2759 
2760 	return 0;
2761 }
2762 
2763 /**
2764  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2765  * @dev: LAN78xx device
2766  * @phydev: PHY device (must be valid)
2767  *
2768  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2769  * the corresponding number of LEDs by writing to HW_CFG.
2770  *
2771  * This helper preserves the original logic, enabling up to 4 LEDs.
2772  * If the property is not present, this function does nothing.
2773  *
2774  * Return: 0 on success or a negative error code.
2775  */
2776 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2777 					  struct phy_device *phydev)
2778 {
2779 	struct device_node *np = phydev->mdio.dev.of_node;
2780 	u32 reg;
2781 	int len, ret;
2782 
2783 	if (!np)
2784 		return 0;
2785 
2786 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2787 					      sizeof(u32));
2788 	if (len < 0)
2789 		return 0;
2790 
2791 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2792 	if (ret < 0)
2793 		return ret;
2794 
2795 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2796 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2797 
2798 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2799 	       (len > 1) * HW_CFG_LED1_EN_ |
2800 	       (len > 2) * HW_CFG_LED2_EN_ |
2801 	       (len > 3) * HW_CFG_LED3_EN_;
2802 
2803 	return lan78xx_write_reg(dev, HW_CFG, reg);
2804 }
2805 
2806 static int lan78xx_phy_init(struct lan78xx_net *dev)
2807 {
2808 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2809 	int ret;
2810 	u32 mii_adv;
2811 	struct phy_device *phydev;
2812 
2813 	phydev = lan78xx_get_phy(dev);
2814 	if (IS_ERR(phydev))
2815 		return PTR_ERR(phydev);
2816 
2817 	ret = lan78xx_mac_prepare_for_phy(dev);
2818 	if (ret < 0)
2819 		goto free_phy;
2820 
2821 	/* if phyirq is not set, use polling mode in phylib */
2822 	if (dev->domain_data.phyirq > 0)
2823 		phydev->irq = dev->domain_data.phyirq;
2824 	else
2825 		phydev->irq = PHY_POLL;
2826 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2827 
2828 	/* set to AUTOMDIX */
2829 	phydev->mdix = ETH_TP_MDI_AUTO;
2830 
2831 	ret = phy_connect_direct(dev->net, phydev,
2832 				 lan78xx_link_status_change,
2833 				 dev->interface);
2834 	if (ret) {
2835 		netdev_err(dev->net, "can't attach PHY to %s\n",
2836 			   dev->mdiobus->id);
2837 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2838 			if (phy_is_pseudo_fixed_link(phydev)) {
2839 				fixed_phy_unregister(phydev);
2840 				phy_device_free(phydev);
2841 			}
2842 		}
2843 		return -EIO;
2844 	}
2845 
2846 	/* MAC doesn't support 1000T Half */
2847 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2848 
2849 	/* support both flow controls */
2850 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2851 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2852 			   phydev->advertising);
2853 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2854 			   phydev->advertising);
2855 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2856 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2857 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2858 
2859 	phy_support_eee(phydev);
2860 
2861 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2862 	if (ret)
2863 		goto free_phy;
2864 
2865 	genphy_config_aneg(phydev);
2866 
2867 	dev->fc_autoneg = phydev->autoneg;
2868 
2869 	return 0;
2870 
2871 free_phy:
2872 	if (phy_is_pseudo_fixed_link(phydev)) {
2873 		fixed_phy_unregister(phydev);
2874 		phy_device_free(phydev);
2875 	}
2876 
2877 	return ret;
2878 }
2879 
2880 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2881 {
2882 	bool rxenabled;
2883 	u32 buf;
2884 	int ret;
2885 
2886 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2887 	if (ret < 0)
2888 		return ret;
2889 
2890 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2891 
2892 	if (rxenabled) {
2893 		buf &= ~MAC_RX_RXEN_;
2894 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2895 		if (ret < 0)
2896 			return ret;
2897 	}
2898 
2899 	/* add 4 to size for FCS */
2900 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2901 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2902 
2903 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2904 	if (ret < 0)
2905 		return ret;
2906 
2907 	if (rxenabled) {
2908 		buf |= MAC_RX_RXEN_;
2909 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2910 		if (ret < 0)
2911 			return ret;
2912 	}
2913 
2914 	return 0;
2915 }
2916 
2917 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2918 {
2919 	struct sk_buff *skb;
2920 	unsigned long flags;
2921 	int count = 0;
2922 
2923 	spin_lock_irqsave(&q->lock, flags);
2924 	while (!skb_queue_empty(q)) {
2925 		struct skb_data	*entry;
2926 		struct urb *urb;
2927 		int ret;
2928 
2929 		skb_queue_walk(q, skb) {
2930 			entry = (struct skb_data *)skb->cb;
2931 			if (entry->state != unlink_start)
2932 				goto found;
2933 		}
2934 		break;
2935 found:
2936 		entry->state = unlink_start;
2937 		urb = entry->urb;
2938 
2939 		/* Get reference count of the URB to avoid it to be
2940 		 * freed during usb_unlink_urb, which may trigger
2941 		 * use-after-free problem inside usb_unlink_urb since
2942 		 * usb_unlink_urb is always racing with .complete
2943 		 * handler(include defer_bh).
2944 		 */
2945 		usb_get_urb(urb);
2946 		spin_unlock_irqrestore(&q->lock, flags);
2947 		/* during some PM-driven resume scenarios,
2948 		 * these (async) unlinks complete immediately
2949 		 */
2950 		ret = usb_unlink_urb(urb);
2951 		if (ret != -EINPROGRESS && ret != 0)
2952 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2953 		else
2954 			count++;
2955 		usb_put_urb(urb);
2956 		spin_lock_irqsave(&q->lock, flags);
2957 	}
2958 	spin_unlock_irqrestore(&q->lock, flags);
2959 	return count;
2960 }
2961 
2962 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2963 {
2964 	struct lan78xx_net *dev = netdev_priv(netdev);
2965 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2966 	int ret;
2967 
2968 	/* no second zero-length packet read wanted after mtu-sized packets */
2969 	if ((max_frame_len % dev->maxpacket) == 0)
2970 		return -EDOM;
2971 
2972 	ret = usb_autopm_get_interface(dev->intf);
2973 	if (ret < 0)
2974 		return ret;
2975 
2976 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2977 	if (ret < 0)
2978 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2979 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2980 	else
2981 		WRITE_ONCE(netdev->mtu, new_mtu);
2982 
2983 	usb_autopm_put_interface(dev->intf);
2984 
2985 	return ret;
2986 }
2987 
2988 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2989 {
2990 	struct lan78xx_net *dev = netdev_priv(netdev);
2991 	struct sockaddr *addr = p;
2992 	u32 addr_lo, addr_hi;
2993 	int ret;
2994 
2995 	if (netif_running(netdev))
2996 		return -EBUSY;
2997 
2998 	if (!is_valid_ether_addr(addr->sa_data))
2999 		return -EADDRNOTAVAIL;
3000 
3001 	eth_hw_addr_set(netdev, addr->sa_data);
3002 
3003 	addr_lo = netdev->dev_addr[0] |
3004 		  netdev->dev_addr[1] << 8 |
3005 		  netdev->dev_addr[2] << 16 |
3006 		  netdev->dev_addr[3] << 24;
3007 	addr_hi = netdev->dev_addr[4] |
3008 		  netdev->dev_addr[5] << 8;
3009 
3010 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3015 	if (ret < 0)
3016 		return ret;
3017 
3018 	/* Added to support MAC address changes */
3019 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3024 }
3025 
3026 /* Enable or disable Rx checksum offload engine */
3027 static int lan78xx_set_features(struct net_device *netdev,
3028 				netdev_features_t features)
3029 {
3030 	struct lan78xx_net *dev = netdev_priv(netdev);
3031 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3032 	unsigned long flags;
3033 
3034 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3035 
3036 	if (features & NETIF_F_RXCSUM) {
3037 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3038 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3039 	} else {
3040 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3041 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3042 	}
3043 
3044 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3045 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3046 	else
3047 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3048 
3049 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3050 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3051 	else
3052 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3053 
3054 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3055 
3056 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3057 }
3058 
3059 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3060 {
3061 	struct lan78xx_priv *pdata =
3062 			container_of(param, struct lan78xx_priv, set_vlan);
3063 	struct lan78xx_net *dev = pdata->dev;
3064 
3065 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3066 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3067 }
3068 
3069 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3070 				   __be16 proto, u16 vid)
3071 {
3072 	struct lan78xx_net *dev = netdev_priv(netdev);
3073 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3074 	u16 vid_bit_index;
3075 	u16 vid_dword_index;
3076 
3077 	vid_dword_index = (vid >> 5) & 0x7F;
3078 	vid_bit_index = vid & 0x1F;
3079 
3080 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3081 
3082 	/* defer register writes to a sleepable context */
3083 	schedule_work(&pdata->set_vlan);
3084 
3085 	return 0;
3086 }
3087 
3088 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3089 				    __be16 proto, u16 vid)
3090 {
3091 	struct lan78xx_net *dev = netdev_priv(netdev);
3092 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3093 	u16 vid_bit_index;
3094 	u16 vid_dword_index;
3095 
3096 	vid_dword_index = (vid >> 5) & 0x7F;
3097 	vid_bit_index = vid & 0x1F;
3098 
3099 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3100 
3101 	/* defer register writes to a sleepable context */
3102 	schedule_work(&pdata->set_vlan);
3103 
3104 	return 0;
3105 }
3106 
3107 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3108 {
3109 	u32 regs[6] = { 0 };
3110 	int ret;
3111 	u32 buf;
3112 
3113 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3114 	if (ret < 0)
3115 		goto init_ltm_failed;
3116 
3117 	if (buf & USB_CFG1_LTM_ENABLE_) {
3118 		u8 temp[2];
3119 		/* Get values from EEPROM first */
3120 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3121 			if (temp[0] == 24) {
3122 				ret = lan78xx_read_raw_eeprom(dev,
3123 							      temp[1] * 2,
3124 							      24,
3125 							      (u8 *)regs);
3126 				if (ret < 0)
3127 					return ret;
3128 			}
3129 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3130 			if (temp[0] == 24) {
3131 				ret = lan78xx_read_raw_otp(dev,
3132 							   temp[1] * 2,
3133 							   24,
3134 							   (u8 *)regs);
3135 				if (ret < 0)
3136 					return ret;
3137 			}
3138 		}
3139 	}
3140 
3141 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3142 	if (ret < 0)
3143 		goto init_ltm_failed;
3144 
3145 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3146 	if (ret < 0)
3147 		goto init_ltm_failed;
3148 
3149 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3150 	if (ret < 0)
3151 		goto init_ltm_failed;
3152 
3153 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3154 	if (ret < 0)
3155 		goto init_ltm_failed;
3156 
3157 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3158 	if (ret < 0)
3159 		goto init_ltm_failed;
3160 
3161 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3162 	if (ret < 0)
3163 		goto init_ltm_failed;
3164 
3165 	return 0;
3166 
3167 init_ltm_failed:
3168 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3169 	return ret;
3170 }
3171 
3172 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3173 {
3174 	int result = 0;
3175 
3176 	switch (dev->udev->speed) {
3177 	case USB_SPEED_SUPER:
3178 		dev->rx_urb_size = RX_SS_URB_SIZE;
3179 		dev->tx_urb_size = TX_SS_URB_SIZE;
3180 		dev->n_rx_urbs = RX_SS_URB_NUM;
3181 		dev->n_tx_urbs = TX_SS_URB_NUM;
3182 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3183 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3184 		break;
3185 	case USB_SPEED_HIGH:
3186 		dev->rx_urb_size = RX_HS_URB_SIZE;
3187 		dev->tx_urb_size = TX_HS_URB_SIZE;
3188 		dev->n_rx_urbs = RX_HS_URB_NUM;
3189 		dev->n_tx_urbs = TX_HS_URB_NUM;
3190 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3191 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3192 		break;
3193 	case USB_SPEED_FULL:
3194 		dev->rx_urb_size = RX_FS_URB_SIZE;
3195 		dev->tx_urb_size = TX_FS_URB_SIZE;
3196 		dev->n_rx_urbs = RX_FS_URB_NUM;
3197 		dev->n_tx_urbs = TX_FS_URB_NUM;
3198 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3199 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3200 		break;
3201 	default:
3202 		netdev_warn(dev->net, "USB bus speed not supported\n");
3203 		result = -EIO;
3204 		break;
3205 	}
3206 
3207 	return result;
3208 }
3209 
3210 static int lan78xx_reset(struct lan78xx_net *dev)
3211 {
3212 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3213 	unsigned long timeout;
3214 	int ret;
3215 	u32 buf;
3216 	u8 sig;
3217 
3218 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3219 	if (ret < 0)
3220 		return ret;
3221 
3222 	buf |= HW_CFG_LRST_;
3223 
3224 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3225 	if (ret < 0)
3226 		return ret;
3227 
3228 	timeout = jiffies + HZ;
3229 	do {
3230 		mdelay(1);
3231 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3232 		if (ret < 0)
3233 			return ret;
3234 
3235 		if (time_after(jiffies, timeout)) {
3236 			netdev_warn(dev->net,
3237 				    "timeout on completion of LiteReset");
3238 			ret = -ETIMEDOUT;
3239 			return ret;
3240 		}
3241 	} while (buf & HW_CFG_LRST_);
3242 
3243 	ret = lan78xx_init_mac_address(dev);
3244 	if (ret < 0)
3245 		return ret;
3246 
3247 	/* save DEVID for later usage */
3248 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3249 	if (ret < 0)
3250 		return ret;
3251 
3252 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3253 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3254 
3255 	/* Respond to the IN token with a NAK */
3256 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3257 	if (ret < 0)
3258 		return ret;
3259 
3260 	buf |= USB_CFG_BIR_;
3261 
3262 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3263 	if (ret < 0)
3264 		return ret;
3265 
3266 	/* Init LTM */
3267 	ret = lan78xx_init_ltm(dev);
3268 	if (ret < 0)
3269 		return ret;
3270 
3271 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3272 	if (ret < 0)
3273 		return ret;
3274 
3275 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3276 	if (ret < 0)
3277 		return ret;
3278 
3279 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3280 	if (ret < 0)
3281 		return ret;
3282 
3283 	buf |= HW_CFG_MEF_;
3284 	buf |= HW_CFG_CLK125_EN_;
3285 	buf |= HW_CFG_REFCLK25_EN_;
3286 
3287 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3288 	if (ret < 0)
3289 		return ret;
3290 
3291 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3292 	if (ret < 0)
3293 		return ret;
3294 
3295 	buf |= USB_CFG_BCE_;
3296 
3297 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3298 	if (ret < 0)
3299 		return ret;
3300 
3301 	/* set FIFO sizes */
3302 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3303 
3304 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3305 	if (ret < 0)
3306 		return ret;
3307 
3308 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3309 
3310 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3311 	if (ret < 0)
3312 		return ret;
3313 
3314 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3315 	if (ret < 0)
3316 		return ret;
3317 
3318 	ret = lan78xx_write_reg(dev, FLOW, 0);
3319 	if (ret < 0)
3320 		return ret;
3321 
3322 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3323 	if (ret < 0)
3324 		return ret;
3325 
3326 	/* Don't need rfe_ctl_lock during initialisation */
3327 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3328 	if (ret < 0)
3329 		return ret;
3330 
3331 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3332 
3333 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3334 	if (ret < 0)
3335 		return ret;
3336 
3337 	/* Enable or disable checksum offload engines */
3338 	ret = lan78xx_set_features(dev->net, dev->net->features);
3339 	if (ret < 0)
3340 		return ret;
3341 
3342 	lan78xx_set_multicast(dev->net);
3343 
3344 	/* reset PHY */
3345 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3346 	if (ret < 0)
3347 		return ret;
3348 
3349 	buf |= PMT_CTL_PHY_RST_;
3350 
3351 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3352 	if (ret < 0)
3353 		return ret;
3354 
3355 	timeout = jiffies + HZ;
3356 	do {
3357 		mdelay(1);
3358 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3359 		if (ret < 0)
3360 			return ret;
3361 
3362 		if (time_after(jiffies, timeout)) {
3363 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3364 			ret = -ETIMEDOUT;
3365 			return ret;
3366 		}
3367 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3368 
3369 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3370 	if (ret < 0)
3371 		return ret;
3372 
3373 	/* LAN7801 only has RGMII mode */
3374 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3375 		buf &= ~MAC_CR_GMII_EN_;
3376 		/* Enable Auto Duplex and Auto speed */
3377 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3378 	}
3379 
3380 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3381 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3382 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3383 		if (!ret && sig != EEPROM_INDICATOR) {
3384 			/* Implies there is no external eeprom. Set mac speed */
3385 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3386 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3387 		}
3388 	}
3389 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3390 	if (ret < 0)
3391 		return ret;
3392 
3393 	ret = lan78xx_set_rx_max_frame_length(dev,
3394 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3395 
3396 	return ret;
3397 }
3398 
3399 static void lan78xx_init_stats(struct lan78xx_net *dev)
3400 {
3401 	u32 *p;
3402 	int i;
3403 
3404 	/* initialize for stats update
3405 	 * some counters are 20bits and some are 32bits
3406 	 */
3407 	p = (u32 *)&dev->stats.rollover_max;
3408 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3409 		p[i] = 0xFFFFF;
3410 
3411 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3418 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3419 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3420 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3421 
3422 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3423 }
3424 
3425 static int lan78xx_open(struct net_device *net)
3426 {
3427 	struct lan78xx_net *dev = netdev_priv(net);
3428 	int ret;
3429 
3430 	netif_dbg(dev, ifup, dev->net, "open device");
3431 
3432 	ret = usb_autopm_get_interface(dev->intf);
3433 	if (ret < 0)
3434 		return ret;
3435 
3436 	mutex_lock(&dev->dev_mutex);
3437 
3438 	phy_start(net->phydev);
3439 
3440 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3441 
3442 	/* for Link Check */
3443 	if (dev->urb_intr) {
3444 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3445 		if (ret < 0) {
3446 			netif_err(dev, ifup, dev->net,
3447 				  "intr submit %d\n", ret);
3448 			goto done;
3449 		}
3450 	}
3451 
3452 	ret = lan78xx_flush_rx_fifo(dev);
3453 	if (ret < 0)
3454 		goto done;
3455 	ret = lan78xx_flush_tx_fifo(dev);
3456 	if (ret < 0)
3457 		goto done;
3458 
3459 	ret = lan78xx_start_tx_path(dev);
3460 	if (ret < 0)
3461 		goto done;
3462 	ret = lan78xx_start_rx_path(dev);
3463 	if (ret < 0)
3464 		goto done;
3465 
3466 	lan78xx_init_stats(dev);
3467 
3468 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3469 
3470 	netif_start_queue(net);
3471 
3472 	dev->link_on = false;
3473 
3474 	napi_enable(&dev->napi);
3475 
3476 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3477 done:
3478 	mutex_unlock(&dev->dev_mutex);
3479 
3480 	if (ret < 0)
3481 		usb_autopm_put_interface(dev->intf);
3482 
3483 	return ret;
3484 }
3485 
3486 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3487 {
3488 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3489 	DECLARE_WAITQUEUE(wait, current);
3490 	int temp;
3491 
3492 	/* ensure there are no more active urbs */
3493 	add_wait_queue(&unlink_wakeup, &wait);
3494 	set_current_state(TASK_UNINTERRUPTIBLE);
3495 	dev->wait = &unlink_wakeup;
3496 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3497 
3498 	/* maybe wait for deletions to finish. */
3499 	while (!skb_queue_empty(&dev->rxq) ||
3500 	       !skb_queue_empty(&dev->txq)) {
3501 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3502 		set_current_state(TASK_UNINTERRUPTIBLE);
3503 		netif_dbg(dev, ifdown, dev->net,
3504 			  "waited for %d urb completions", temp);
3505 	}
3506 	set_current_state(TASK_RUNNING);
3507 	dev->wait = NULL;
3508 	remove_wait_queue(&unlink_wakeup, &wait);
3509 
3510 	/* empty Rx done, Rx overflow and Tx pend queues
3511 	 */
3512 	while (!skb_queue_empty(&dev->rxq_done)) {
3513 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3514 
3515 		lan78xx_release_rx_buf(dev, skb);
3516 	}
3517 
3518 	skb_queue_purge(&dev->rxq_overflow);
3519 	skb_queue_purge(&dev->txq_pend);
3520 }
3521 
3522 static int lan78xx_stop(struct net_device *net)
3523 {
3524 	struct lan78xx_net *dev = netdev_priv(net);
3525 
3526 	netif_dbg(dev, ifup, dev->net, "stop device");
3527 
3528 	mutex_lock(&dev->dev_mutex);
3529 
3530 	if (timer_pending(&dev->stat_monitor))
3531 		timer_delete_sync(&dev->stat_monitor);
3532 
3533 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3534 	netif_stop_queue(net);
3535 	napi_disable(&dev->napi);
3536 
3537 	lan78xx_terminate_urbs(dev);
3538 
3539 	netif_info(dev, ifdown, dev->net,
3540 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3541 		   net->stats.rx_packets, net->stats.tx_packets,
3542 		   net->stats.rx_errors, net->stats.tx_errors);
3543 
3544 	/* ignore errors that occur stopping the Tx and Rx data paths */
3545 	lan78xx_stop_tx_path(dev);
3546 	lan78xx_stop_rx_path(dev);
3547 
3548 	if (net->phydev)
3549 		phy_stop(net->phydev);
3550 
3551 	usb_kill_urb(dev->urb_intr);
3552 
3553 	/* deferred work (task, timer, softirq) must also stop.
3554 	 * can't flush_scheduled_work() until we drop rtnl (later),
3555 	 * else workers could deadlock; so make workers a NOP.
3556 	 */
3557 	clear_bit(EVENT_TX_HALT, &dev->flags);
3558 	clear_bit(EVENT_RX_HALT, &dev->flags);
3559 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3560 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3561 
3562 	cancel_delayed_work_sync(&dev->wq);
3563 
3564 	usb_autopm_put_interface(dev->intf);
3565 
3566 	mutex_unlock(&dev->dev_mutex);
3567 
3568 	return 0;
3569 }
3570 
3571 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3572 			       struct sk_buff_head *list, enum skb_state state)
3573 {
3574 	unsigned long flags;
3575 	enum skb_state old_state;
3576 	struct skb_data *entry = (struct skb_data *)skb->cb;
3577 
3578 	spin_lock_irqsave(&list->lock, flags);
3579 	old_state = entry->state;
3580 	entry->state = state;
3581 
3582 	__skb_unlink(skb, list);
3583 	spin_unlock(&list->lock);
3584 	spin_lock(&dev->rxq_done.lock);
3585 
3586 	__skb_queue_tail(&dev->rxq_done, skb);
3587 	if (skb_queue_len(&dev->rxq_done) == 1)
3588 		napi_schedule(&dev->napi);
3589 
3590 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3591 
3592 	return old_state;
3593 }
3594 
3595 static void tx_complete(struct urb *urb)
3596 {
3597 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3598 	struct skb_data *entry = (struct skb_data *)skb->cb;
3599 	struct lan78xx_net *dev = entry->dev;
3600 
3601 	if (urb->status == 0) {
3602 		dev->net->stats.tx_packets += entry->num_of_packet;
3603 		dev->net->stats.tx_bytes += entry->length;
3604 	} else {
3605 		dev->net->stats.tx_errors += entry->num_of_packet;
3606 
3607 		switch (urb->status) {
3608 		case -EPIPE:
3609 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3610 			break;
3611 
3612 		/* software-driven interface shutdown */
3613 		case -ECONNRESET:
3614 		case -ESHUTDOWN:
3615 			netif_dbg(dev, tx_err, dev->net,
3616 				  "tx err interface gone %d\n",
3617 				  entry->urb->status);
3618 			break;
3619 
3620 		case -EPROTO:
3621 		case -ETIME:
3622 		case -EILSEQ:
3623 			netif_stop_queue(dev->net);
3624 			netif_dbg(dev, tx_err, dev->net,
3625 				  "tx err queue stopped %d\n",
3626 				  entry->urb->status);
3627 			break;
3628 		default:
3629 			netif_dbg(dev, tx_err, dev->net,
3630 				  "unknown tx err %d\n",
3631 				  entry->urb->status);
3632 			break;
3633 		}
3634 	}
3635 
3636 	usb_autopm_put_interface_async(dev->intf);
3637 
3638 	skb_unlink(skb, &dev->txq);
3639 
3640 	lan78xx_release_tx_buf(dev, skb);
3641 
3642 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3643 	 */
3644 	if (skb_queue_empty(&dev->txq) &&
3645 	    !skb_queue_empty(&dev->txq_pend))
3646 		napi_schedule(&dev->napi);
3647 }
3648 
3649 static void lan78xx_queue_skb(struct sk_buff_head *list,
3650 			      struct sk_buff *newsk, enum skb_state state)
3651 {
3652 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3653 
3654 	__skb_queue_tail(list, newsk);
3655 	entry->state = state;
3656 }
3657 
3658 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3659 {
3660 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3661 }
3662 
3663 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3664 {
3665 	return dev->tx_pend_data_len;
3666 }
3667 
3668 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3669 				    struct sk_buff *skb,
3670 				    unsigned int *tx_pend_data_len)
3671 {
3672 	unsigned long flags;
3673 
3674 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3675 
3676 	__skb_queue_tail(&dev->txq_pend, skb);
3677 
3678 	dev->tx_pend_data_len += skb->len;
3679 	*tx_pend_data_len = dev->tx_pend_data_len;
3680 
3681 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3682 }
3683 
3684 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3685 					 struct sk_buff *skb,
3686 					 unsigned int *tx_pend_data_len)
3687 {
3688 	unsigned long flags;
3689 
3690 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3691 
3692 	__skb_queue_head(&dev->txq_pend, skb);
3693 
3694 	dev->tx_pend_data_len += skb->len;
3695 	*tx_pend_data_len = dev->tx_pend_data_len;
3696 
3697 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3698 }
3699 
3700 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3701 				    struct sk_buff **skb,
3702 				    unsigned int *tx_pend_data_len)
3703 {
3704 	unsigned long flags;
3705 
3706 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3707 
3708 	*skb = __skb_dequeue(&dev->txq_pend);
3709 	if (*skb)
3710 		dev->tx_pend_data_len -= (*skb)->len;
3711 	*tx_pend_data_len = dev->tx_pend_data_len;
3712 
3713 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3714 }
3715 
3716 static netdev_tx_t
3717 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3718 {
3719 	struct lan78xx_net *dev = netdev_priv(net);
3720 	unsigned int tx_pend_data_len;
3721 
3722 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3723 		schedule_delayed_work(&dev->wq, 0);
3724 
3725 	skb_tx_timestamp(skb);
3726 
3727 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3728 
3729 	/* Set up a Tx URB if none is in progress */
3730 
3731 	if (skb_queue_empty(&dev->txq))
3732 		napi_schedule(&dev->napi);
3733 
3734 	/* Stop stack Tx queue if we have enough data to fill
3735 	 * all the free Tx URBs.
3736 	 */
3737 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3738 		netif_stop_queue(net);
3739 
3740 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3741 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3742 
3743 		/* Kick off transmission of pending data */
3744 
3745 		if (!skb_queue_empty(&dev->txq_free))
3746 			napi_schedule(&dev->napi);
3747 	}
3748 
3749 	return NETDEV_TX_OK;
3750 }
3751 
3752 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3753 {
3754 	struct lan78xx_priv *pdata = NULL;
3755 	int ret;
3756 	int i;
3757 
3758 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3759 
3760 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3761 	if (!pdata) {
3762 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3763 		return -ENOMEM;
3764 	}
3765 
3766 	pdata->dev = dev;
3767 
3768 	spin_lock_init(&pdata->rfe_ctl_lock);
3769 	mutex_init(&pdata->dataport_mutex);
3770 
3771 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3772 
3773 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3774 		pdata->vlan_table[i] = 0;
3775 
3776 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3777 
3778 	dev->net->features = 0;
3779 
3780 	if (DEFAULT_TX_CSUM_ENABLE)
3781 		dev->net->features |= NETIF_F_HW_CSUM;
3782 
3783 	if (DEFAULT_RX_CSUM_ENABLE)
3784 		dev->net->features |= NETIF_F_RXCSUM;
3785 
3786 	if (DEFAULT_TSO_CSUM_ENABLE)
3787 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3788 
3789 	if (DEFAULT_VLAN_RX_OFFLOAD)
3790 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3791 
3792 	if (DEFAULT_VLAN_FILTER_ENABLE)
3793 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3794 
3795 	dev->net->hw_features = dev->net->features;
3796 
3797 	ret = lan78xx_setup_irq_domain(dev);
3798 	if (ret < 0) {
3799 		netdev_warn(dev->net,
3800 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3801 		goto out1;
3802 	}
3803 
3804 	/* Init all registers */
3805 	ret = lan78xx_reset(dev);
3806 	if (ret) {
3807 		netdev_warn(dev->net, "Registers INIT FAILED....");
3808 		goto out2;
3809 	}
3810 
3811 	ret = lan78xx_mdio_init(dev);
3812 	if (ret) {
3813 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3814 		goto out2;
3815 	}
3816 
3817 	dev->net->flags |= IFF_MULTICAST;
3818 
3819 	pdata->wol = WAKE_MAGIC;
3820 
3821 	return ret;
3822 
3823 out2:
3824 	lan78xx_remove_irq_domain(dev);
3825 
3826 out1:
3827 	netdev_warn(dev->net, "Bind routine FAILED");
3828 	cancel_work_sync(&pdata->set_multicast);
3829 	cancel_work_sync(&pdata->set_vlan);
3830 	kfree(pdata);
3831 	return ret;
3832 }
3833 
3834 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3835 {
3836 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3837 
3838 	lan78xx_remove_irq_domain(dev);
3839 
3840 	lan78xx_remove_mdio(dev);
3841 
3842 	if (pdata) {
3843 		cancel_work_sync(&pdata->set_multicast);
3844 		cancel_work_sync(&pdata->set_vlan);
3845 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3846 		kfree(pdata);
3847 		pdata = NULL;
3848 		dev->data[0] = 0;
3849 	}
3850 }
3851 
3852 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3853 				    struct sk_buff *skb,
3854 				    u32 rx_cmd_a, u32 rx_cmd_b)
3855 {
3856 	/* HW Checksum offload appears to be flawed if used when not stripping
3857 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3858 	 */
3859 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3860 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3861 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3862 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3863 		skb->ip_summed = CHECKSUM_NONE;
3864 	} else {
3865 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3866 		skb->ip_summed = CHECKSUM_COMPLETE;
3867 	}
3868 }
3869 
3870 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3871 				    struct sk_buff *skb,
3872 				    u32 rx_cmd_a, u32 rx_cmd_b)
3873 {
3874 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3875 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3876 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3877 				       (rx_cmd_b & 0xffff));
3878 }
3879 
3880 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3881 {
3882 	dev->net->stats.rx_packets++;
3883 	dev->net->stats.rx_bytes += skb->len;
3884 
3885 	skb->protocol = eth_type_trans(skb, dev->net);
3886 
3887 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3888 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3889 	memset(skb->cb, 0, sizeof(struct skb_data));
3890 
3891 	if (skb_defer_rx_timestamp(skb))
3892 		return;
3893 
3894 	napi_gro_receive(&dev->napi, skb);
3895 }
3896 
3897 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3898 		      int budget, int *work_done)
3899 {
3900 	if (skb->len < RX_SKB_MIN_LEN)
3901 		return 0;
3902 
3903 	/* Extract frames from the URB buffer and pass each one to
3904 	 * the stack in a new NAPI SKB.
3905 	 */
3906 	while (skb->len > 0) {
3907 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3908 		u16 rx_cmd_c;
3909 		unsigned char *packet;
3910 
3911 		rx_cmd_a = get_unaligned_le32(skb->data);
3912 		skb_pull(skb, sizeof(rx_cmd_a));
3913 
3914 		rx_cmd_b = get_unaligned_le32(skb->data);
3915 		skb_pull(skb, sizeof(rx_cmd_b));
3916 
3917 		rx_cmd_c = get_unaligned_le16(skb->data);
3918 		skb_pull(skb, sizeof(rx_cmd_c));
3919 
3920 		packet = skb->data;
3921 
3922 		/* get the packet length */
3923 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3924 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3925 
3926 		if (unlikely(size > skb->len)) {
3927 			netif_dbg(dev, rx_err, dev->net,
3928 				  "size err rx_cmd_a=0x%08x\n",
3929 				  rx_cmd_a);
3930 			return 0;
3931 		}
3932 
3933 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3934 			netif_dbg(dev, rx_err, dev->net,
3935 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3936 		} else {
3937 			u32 frame_len;
3938 			struct sk_buff *skb2;
3939 
3940 			if (unlikely(size < ETH_FCS_LEN)) {
3941 				netif_dbg(dev, rx_err, dev->net,
3942 					  "size err rx_cmd_a=0x%08x\n",
3943 					  rx_cmd_a);
3944 				return 0;
3945 			}
3946 
3947 			frame_len = size - ETH_FCS_LEN;
3948 
3949 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3950 			if (!skb2)
3951 				return 0;
3952 
3953 			memcpy(skb2->data, packet, frame_len);
3954 
3955 			skb_put(skb2, frame_len);
3956 
3957 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3958 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3959 
3960 			/* Processing of the URB buffer must complete once
3961 			 * it has started. If the NAPI work budget is exhausted
3962 			 * while frames remain they are added to the overflow
3963 			 * queue for delivery in the next NAPI polling cycle.
3964 			 */
3965 			if (*work_done < budget) {
3966 				lan78xx_skb_return(dev, skb2);
3967 				++(*work_done);
3968 			} else {
3969 				skb_queue_tail(&dev->rxq_overflow, skb2);
3970 			}
3971 		}
3972 
3973 		skb_pull(skb, size);
3974 
3975 		/* skip padding bytes before the next frame starts */
3976 		if (skb->len)
3977 			skb_pull(skb, align_count);
3978 	}
3979 
3980 	return 1;
3981 }
3982 
3983 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3984 			      int budget, int *work_done)
3985 {
3986 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3987 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3988 		dev->net->stats.rx_errors++;
3989 	}
3990 }
3991 
3992 static void rx_complete(struct urb *urb)
3993 {
3994 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3995 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3996 	struct lan78xx_net *dev = entry->dev;
3997 	int urb_status = urb->status;
3998 	enum skb_state state;
3999 
4000 	netif_dbg(dev, rx_status, dev->net,
4001 		  "rx done: status %d", urb->status);
4002 
4003 	skb_put(skb, urb->actual_length);
4004 	state = rx_done;
4005 
4006 	if (urb != entry->urb)
4007 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
4008 
4009 	switch (urb_status) {
4010 	case 0:
4011 		if (skb->len < RX_SKB_MIN_LEN) {
4012 			state = rx_cleanup;
4013 			dev->net->stats.rx_errors++;
4014 			dev->net->stats.rx_length_errors++;
4015 			netif_dbg(dev, rx_err, dev->net,
4016 				  "rx length %d\n", skb->len);
4017 		}
4018 		usb_mark_last_busy(dev->udev);
4019 		break;
4020 	case -EPIPE:
4021 		dev->net->stats.rx_errors++;
4022 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4023 		fallthrough;
4024 	case -ECONNRESET:				/* async unlink */
4025 	case -ESHUTDOWN:				/* hardware gone */
4026 		netif_dbg(dev, ifdown, dev->net,
4027 			  "rx shutdown, code %d\n", urb_status);
4028 		state = rx_cleanup;
4029 		break;
4030 	case -EPROTO:
4031 	case -ETIME:
4032 	case -EILSEQ:
4033 		dev->net->stats.rx_errors++;
4034 		state = rx_cleanup;
4035 		break;
4036 
4037 	/* data overrun ... flush fifo? */
4038 	case -EOVERFLOW:
4039 		dev->net->stats.rx_over_errors++;
4040 		fallthrough;
4041 
4042 	default:
4043 		state = rx_cleanup;
4044 		dev->net->stats.rx_errors++;
4045 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4046 		break;
4047 	}
4048 
4049 	state = defer_bh(dev, skb, &dev->rxq, state);
4050 }
4051 
4052 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4053 {
4054 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4055 	size_t size = dev->rx_urb_size;
4056 	struct urb *urb = entry->urb;
4057 	unsigned long lockflags;
4058 	int ret = 0;
4059 
4060 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4061 			  skb->data, size, rx_complete, skb);
4062 
4063 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4064 
4065 	if (netif_device_present(dev->net) &&
4066 	    netif_running(dev->net) &&
4067 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4068 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4069 		ret = usb_submit_urb(urb, flags);
4070 		switch (ret) {
4071 		case 0:
4072 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4073 			break;
4074 		case -EPIPE:
4075 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4076 			break;
4077 		case -ENODEV:
4078 		case -ENOENT:
4079 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4080 			netif_device_detach(dev->net);
4081 			break;
4082 		case -EHOSTUNREACH:
4083 			ret = -ENOLINK;
4084 			napi_schedule(&dev->napi);
4085 			break;
4086 		default:
4087 			netif_dbg(dev, rx_err, dev->net,
4088 				  "rx submit, %d\n", ret);
4089 			napi_schedule(&dev->napi);
4090 			break;
4091 		}
4092 	} else {
4093 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4094 		ret = -ENOLINK;
4095 	}
4096 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4097 
4098 	if (ret)
4099 		lan78xx_release_rx_buf(dev, skb);
4100 
4101 	return ret;
4102 }
4103 
4104 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4105 {
4106 	struct sk_buff *rx_buf;
4107 
4108 	/* Ensure the maximum number of Rx URBs is submitted
4109 	 */
4110 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4111 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4112 			break;
4113 	}
4114 }
4115 
4116 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4117 				    struct sk_buff *rx_buf)
4118 {
4119 	/* reset SKB data pointers */
4120 
4121 	rx_buf->data = rx_buf->head;
4122 	skb_reset_tail_pointer(rx_buf);
4123 	rx_buf->len = 0;
4124 	rx_buf->data_len = 0;
4125 
4126 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4127 }
4128 
4129 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4130 {
4131 	u32 tx_cmd_a;
4132 	u32 tx_cmd_b;
4133 
4134 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4135 
4136 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4137 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4138 
4139 	tx_cmd_b = 0;
4140 	if (skb_is_gso(skb)) {
4141 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4142 
4143 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4144 
4145 		tx_cmd_a |= TX_CMD_A_LSO_;
4146 	}
4147 
4148 	if (skb_vlan_tag_present(skb)) {
4149 		tx_cmd_a |= TX_CMD_A_IVTG_;
4150 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4151 	}
4152 
4153 	put_unaligned_le32(tx_cmd_a, buffer);
4154 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4155 }
4156 
4157 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4158 					    struct sk_buff *tx_buf)
4159 {
4160 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4161 	int remain = dev->tx_urb_size;
4162 	u8 *tx_data = tx_buf->data;
4163 	u32 urb_len = 0;
4164 
4165 	entry->num_of_packet = 0;
4166 	entry->length = 0;
4167 
4168 	/* Work through the pending SKBs and copy the data of each SKB into
4169 	 * the URB buffer if there room for all the SKB data.
4170 	 *
4171 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4172 	 */
4173 	while (remain >= TX_SKB_MIN_LEN) {
4174 		unsigned int pending_bytes;
4175 		unsigned int align_bytes;
4176 		struct sk_buff *skb;
4177 		unsigned int len;
4178 
4179 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4180 
4181 		if (!skb)
4182 			break;
4183 
4184 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4185 			      TX_ALIGNMENT;
4186 		len = align_bytes + TX_CMD_LEN + skb->len;
4187 		if (len > remain) {
4188 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4189 			break;
4190 		}
4191 
4192 		tx_data += align_bytes;
4193 
4194 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4195 		tx_data += TX_CMD_LEN;
4196 
4197 		len = skb->len;
4198 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4199 			struct net_device_stats *stats = &dev->net->stats;
4200 
4201 			stats->tx_dropped++;
4202 			dev_kfree_skb_any(skb);
4203 			tx_data -= TX_CMD_LEN;
4204 			continue;
4205 		}
4206 
4207 		tx_data += len;
4208 		entry->length += len;
4209 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4210 
4211 		dev_kfree_skb_any(skb);
4212 
4213 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4214 
4215 		remain = dev->tx_urb_size - urb_len;
4216 	}
4217 
4218 	skb_put(tx_buf, urb_len);
4219 
4220 	return entry;
4221 }
4222 
4223 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4224 {
4225 	int ret;
4226 
4227 	/* Start the stack Tx queue if it was stopped
4228 	 */
4229 	netif_tx_lock(dev->net);
4230 	if (netif_queue_stopped(dev->net)) {
4231 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4232 			netif_wake_queue(dev->net);
4233 	}
4234 	netif_tx_unlock(dev->net);
4235 
4236 	/* Go through the Tx pending queue and set up URBs to transfer
4237 	 * the data to the device. Stop if no more pending data or URBs,
4238 	 * or if an error occurs when a URB is submitted.
4239 	 */
4240 	do {
4241 		struct skb_data *entry;
4242 		struct sk_buff *tx_buf;
4243 		unsigned long flags;
4244 
4245 		if (skb_queue_empty(&dev->txq_pend))
4246 			break;
4247 
4248 		tx_buf = lan78xx_get_tx_buf(dev);
4249 		if (!tx_buf)
4250 			break;
4251 
4252 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4253 
4254 		spin_lock_irqsave(&dev->txq.lock, flags);
4255 		ret = usb_autopm_get_interface_async(dev->intf);
4256 		if (ret < 0) {
4257 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4258 			goto out;
4259 		}
4260 
4261 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4262 				  tx_buf->data, tx_buf->len, tx_complete,
4263 				  tx_buf);
4264 
4265 		if (tx_buf->len % dev->maxpacket == 0) {
4266 			/* send USB_ZERO_PACKET */
4267 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4268 		}
4269 
4270 #ifdef CONFIG_PM
4271 		/* if device is asleep stop outgoing packet processing */
4272 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4273 			usb_anchor_urb(entry->urb, &dev->deferred);
4274 			netif_stop_queue(dev->net);
4275 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4276 			netdev_dbg(dev->net,
4277 				   "Delaying transmission for resumption\n");
4278 			return;
4279 		}
4280 #endif
4281 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4282 		switch (ret) {
4283 		case 0:
4284 			netif_trans_update(dev->net);
4285 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4286 			break;
4287 		case -EPIPE:
4288 			netif_stop_queue(dev->net);
4289 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4290 			usb_autopm_put_interface_async(dev->intf);
4291 			break;
4292 		case -ENODEV:
4293 		case -ENOENT:
4294 			netif_dbg(dev, tx_err, dev->net,
4295 				  "tx submit urb err %d (disconnected?)", ret);
4296 			netif_device_detach(dev->net);
4297 			break;
4298 		default:
4299 			usb_autopm_put_interface_async(dev->intf);
4300 			netif_dbg(dev, tx_err, dev->net,
4301 				  "tx submit urb err %d\n", ret);
4302 			break;
4303 		}
4304 
4305 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4306 
4307 		if (ret) {
4308 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4309 out:
4310 			dev->net->stats.tx_dropped += entry->num_of_packet;
4311 			lan78xx_release_tx_buf(dev, tx_buf);
4312 		}
4313 	} while (ret == 0);
4314 }
4315 
4316 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4317 {
4318 	struct sk_buff_head done;
4319 	struct sk_buff *rx_buf;
4320 	struct skb_data *entry;
4321 	unsigned long flags;
4322 	int work_done = 0;
4323 
4324 	/* Pass frames received in the last NAPI cycle before
4325 	 * working on newly completed URBs.
4326 	 */
4327 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4328 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4329 		++work_done;
4330 	}
4331 
4332 	/* Take a snapshot of the done queue and move items to a
4333 	 * temporary queue. Rx URB completions will continue to add
4334 	 * to the done queue.
4335 	 */
4336 	__skb_queue_head_init(&done);
4337 
4338 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4339 	skb_queue_splice_init(&dev->rxq_done, &done);
4340 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4341 
4342 	/* Extract receive frames from completed URBs and
4343 	 * pass them to the stack. Re-submit each completed URB.
4344 	 */
4345 	while ((work_done < budget) &&
4346 	       (rx_buf = __skb_dequeue(&done))) {
4347 		entry = (struct skb_data *)(rx_buf->cb);
4348 		switch (entry->state) {
4349 		case rx_done:
4350 			rx_process(dev, rx_buf, budget, &work_done);
4351 			break;
4352 		case rx_cleanup:
4353 			break;
4354 		default:
4355 			netdev_dbg(dev->net, "rx buf state %d\n",
4356 				   entry->state);
4357 			break;
4358 		}
4359 
4360 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4361 	}
4362 
4363 	/* If budget was consumed before processing all the URBs put them
4364 	 * back on the front of the done queue. They will be first to be
4365 	 * processed in the next NAPI cycle.
4366 	 */
4367 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4368 	skb_queue_splice(&done, &dev->rxq_done);
4369 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4370 
4371 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4372 		/* reset update timer delta */
4373 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4374 			dev->delta = 1;
4375 			mod_timer(&dev->stat_monitor,
4376 				  jiffies + STAT_UPDATE_TIMER);
4377 		}
4378 
4379 		/* Submit all free Rx URBs */
4380 
4381 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4382 			lan78xx_rx_urb_submit_all(dev);
4383 
4384 		/* Submit new Tx URBs */
4385 
4386 		lan78xx_tx_bh(dev);
4387 	}
4388 
4389 	return work_done;
4390 }
4391 
4392 static int lan78xx_poll(struct napi_struct *napi, int budget)
4393 {
4394 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4395 	int result = budget;
4396 	int work_done;
4397 
4398 	/* Don't do any work if the device is suspended */
4399 
4400 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4401 		napi_complete_done(napi, 0);
4402 		return 0;
4403 	}
4404 
4405 	/* Process completed URBs and submit new URBs */
4406 
4407 	work_done = lan78xx_bh(dev, budget);
4408 
4409 	if (work_done < budget) {
4410 		napi_complete_done(napi, work_done);
4411 
4412 		/* Start a new polling cycle if data was received or
4413 		 * data is waiting to be transmitted.
4414 		 */
4415 		if (!skb_queue_empty(&dev->rxq_done)) {
4416 			napi_schedule(napi);
4417 		} else if (netif_carrier_ok(dev->net)) {
4418 			if (skb_queue_empty(&dev->txq) &&
4419 			    !skb_queue_empty(&dev->txq_pend)) {
4420 				napi_schedule(napi);
4421 			} else {
4422 				netif_tx_lock(dev->net);
4423 				if (netif_queue_stopped(dev->net)) {
4424 					netif_wake_queue(dev->net);
4425 					napi_schedule(napi);
4426 				}
4427 				netif_tx_unlock(dev->net);
4428 			}
4429 		}
4430 		result = work_done;
4431 	}
4432 
4433 	return result;
4434 }
4435 
4436 static void lan78xx_delayedwork(struct work_struct *work)
4437 {
4438 	int status;
4439 	struct lan78xx_net *dev;
4440 
4441 	dev = container_of(work, struct lan78xx_net, wq.work);
4442 
4443 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4444 		return;
4445 
4446 	if (usb_autopm_get_interface(dev->intf) < 0)
4447 		return;
4448 
4449 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4450 		unlink_urbs(dev, &dev->txq);
4451 
4452 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4453 		if (status < 0 &&
4454 		    status != -EPIPE &&
4455 		    status != -ESHUTDOWN) {
4456 			if (netif_msg_tx_err(dev))
4457 				netdev_err(dev->net,
4458 					   "can't clear tx halt, status %d\n",
4459 					   status);
4460 		} else {
4461 			clear_bit(EVENT_TX_HALT, &dev->flags);
4462 			if (status != -ESHUTDOWN)
4463 				netif_wake_queue(dev->net);
4464 		}
4465 	}
4466 
4467 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4468 		unlink_urbs(dev, &dev->rxq);
4469 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4470 		if (status < 0 &&
4471 		    status != -EPIPE &&
4472 		    status != -ESHUTDOWN) {
4473 			if (netif_msg_rx_err(dev))
4474 				netdev_err(dev->net,
4475 					   "can't clear rx halt, status %d\n",
4476 					   status);
4477 		} else {
4478 			clear_bit(EVENT_RX_HALT, &dev->flags);
4479 			napi_schedule(&dev->napi);
4480 		}
4481 	}
4482 
4483 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4484 		int ret = 0;
4485 
4486 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4487 		if (lan78xx_link_reset(dev) < 0) {
4488 			netdev_info(dev->net, "link reset failed (%d)\n",
4489 				    ret);
4490 		}
4491 	}
4492 
4493 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4494 		lan78xx_update_stats(dev);
4495 
4496 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4497 
4498 		mod_timer(&dev->stat_monitor,
4499 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4500 
4501 		dev->delta = min((dev->delta * 2), 50);
4502 	}
4503 
4504 	usb_autopm_put_interface(dev->intf);
4505 }
4506 
4507 static void intr_complete(struct urb *urb)
4508 {
4509 	struct lan78xx_net *dev = urb->context;
4510 	int status = urb->status;
4511 
4512 	switch (status) {
4513 	/* success */
4514 	case 0:
4515 		lan78xx_status(dev, urb);
4516 		break;
4517 
4518 	/* software-driven interface shutdown */
4519 	case -ENOENT:			/* urb killed */
4520 	case -ENODEV:			/* hardware gone */
4521 	case -ESHUTDOWN:		/* hardware gone */
4522 		netif_dbg(dev, ifdown, dev->net,
4523 			  "intr shutdown, code %d\n", status);
4524 		return;
4525 
4526 	/* NOTE:  not throttling like RX/TX, since this endpoint
4527 	 * already polls infrequently
4528 	 */
4529 	default:
4530 		netdev_dbg(dev->net, "intr status %d\n", status);
4531 		break;
4532 	}
4533 
4534 	if (!netif_device_present(dev->net) ||
4535 	    !netif_running(dev->net)) {
4536 		netdev_warn(dev->net, "not submitting new status URB");
4537 		return;
4538 	}
4539 
4540 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4541 	status = usb_submit_urb(urb, GFP_ATOMIC);
4542 
4543 	switch (status) {
4544 	case  0:
4545 		break;
4546 	case -ENODEV:
4547 	case -ENOENT:
4548 		netif_dbg(dev, timer, dev->net,
4549 			  "intr resubmit %d (disconnect?)", status);
4550 		netif_device_detach(dev->net);
4551 		break;
4552 	default:
4553 		netif_err(dev, timer, dev->net,
4554 			  "intr resubmit --> %d\n", status);
4555 		break;
4556 	}
4557 }
4558 
4559 static void lan78xx_disconnect(struct usb_interface *intf)
4560 {
4561 	struct lan78xx_net *dev;
4562 	struct usb_device *udev;
4563 	struct net_device *net;
4564 	struct phy_device *phydev;
4565 
4566 	dev = usb_get_intfdata(intf);
4567 	usb_set_intfdata(intf, NULL);
4568 	if (!dev)
4569 		return;
4570 
4571 	netif_napi_del(&dev->napi);
4572 
4573 	udev = interface_to_usbdev(intf);
4574 	net = dev->net;
4575 
4576 	unregister_netdev(net);
4577 
4578 	timer_shutdown_sync(&dev->stat_monitor);
4579 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4580 	cancel_delayed_work_sync(&dev->wq);
4581 
4582 	phydev = net->phydev;
4583 
4584 	phy_disconnect(net->phydev);
4585 
4586 	if (phy_is_pseudo_fixed_link(phydev)) {
4587 		fixed_phy_unregister(phydev);
4588 		phy_device_free(phydev);
4589 	}
4590 
4591 	usb_scuttle_anchored_urbs(&dev->deferred);
4592 
4593 	lan78xx_unbind(dev, intf);
4594 
4595 	lan78xx_free_tx_resources(dev);
4596 	lan78xx_free_rx_resources(dev);
4597 
4598 	usb_kill_urb(dev->urb_intr);
4599 	usb_free_urb(dev->urb_intr);
4600 
4601 	free_netdev(net);
4602 	usb_put_dev(udev);
4603 }
4604 
4605 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4606 {
4607 	struct lan78xx_net *dev = netdev_priv(net);
4608 
4609 	unlink_urbs(dev, &dev->txq);
4610 	napi_schedule(&dev->napi);
4611 }
4612 
4613 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4614 						struct net_device *netdev,
4615 						netdev_features_t features)
4616 {
4617 	struct lan78xx_net *dev = netdev_priv(netdev);
4618 
4619 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4620 		features &= ~NETIF_F_GSO_MASK;
4621 
4622 	features = vlan_features_check(skb, features);
4623 	features = vxlan_features_check(skb, features);
4624 
4625 	return features;
4626 }
4627 
4628 static const struct net_device_ops lan78xx_netdev_ops = {
4629 	.ndo_open		= lan78xx_open,
4630 	.ndo_stop		= lan78xx_stop,
4631 	.ndo_start_xmit		= lan78xx_start_xmit,
4632 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4633 	.ndo_change_mtu		= lan78xx_change_mtu,
4634 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4635 	.ndo_validate_addr	= eth_validate_addr,
4636 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4637 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4638 	.ndo_set_features	= lan78xx_set_features,
4639 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4640 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4641 	.ndo_features_check	= lan78xx_features_check,
4642 };
4643 
4644 static void lan78xx_stat_monitor(struct timer_list *t)
4645 {
4646 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4647 
4648 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4649 }
4650 
4651 static int lan78xx_probe(struct usb_interface *intf,
4652 			 const struct usb_device_id *id)
4653 {
4654 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4655 	struct lan78xx_net *dev;
4656 	struct net_device *netdev;
4657 	struct usb_device *udev;
4658 	int ret;
4659 	unsigned int maxp;
4660 	unsigned int period;
4661 	u8 *buf = NULL;
4662 
4663 	udev = interface_to_usbdev(intf);
4664 	udev = usb_get_dev(udev);
4665 
4666 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4667 	if (!netdev) {
4668 		dev_err(&intf->dev, "Error: OOM\n");
4669 		ret = -ENOMEM;
4670 		goto out1;
4671 	}
4672 
4673 	/* netdev_printk() needs this */
4674 	SET_NETDEV_DEV(netdev, &intf->dev);
4675 
4676 	dev = netdev_priv(netdev);
4677 	dev->udev = udev;
4678 	dev->intf = intf;
4679 	dev->net = netdev;
4680 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4681 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4682 
4683 	skb_queue_head_init(&dev->rxq);
4684 	skb_queue_head_init(&dev->txq);
4685 	skb_queue_head_init(&dev->rxq_done);
4686 	skb_queue_head_init(&dev->txq_pend);
4687 	skb_queue_head_init(&dev->rxq_overflow);
4688 	mutex_init(&dev->mdiobus_mutex);
4689 	mutex_init(&dev->dev_mutex);
4690 
4691 	ret = lan78xx_urb_config_init(dev);
4692 	if (ret < 0)
4693 		goto out2;
4694 
4695 	ret = lan78xx_alloc_tx_resources(dev);
4696 	if (ret < 0)
4697 		goto out2;
4698 
4699 	ret = lan78xx_alloc_rx_resources(dev);
4700 	if (ret < 0)
4701 		goto out3;
4702 
4703 	/* MTU range: 68 - 9000 */
4704 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4705 
4706 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4707 
4708 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4709 
4710 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4711 	init_usb_anchor(&dev->deferred);
4712 
4713 	netdev->netdev_ops = &lan78xx_netdev_ops;
4714 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4715 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4716 
4717 	dev->delta = 1;
4718 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4719 
4720 	mutex_init(&dev->stats.access_lock);
4721 
4722 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4723 		ret = -ENODEV;
4724 		goto out4;
4725 	}
4726 
4727 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4728 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4729 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4730 		ret = -ENODEV;
4731 		goto out4;
4732 	}
4733 
4734 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4735 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4736 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4737 		ret = -ENODEV;
4738 		goto out4;
4739 	}
4740 
4741 	ep_intr = &intf->cur_altsetting->endpoint[2];
4742 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4743 		ret = -ENODEV;
4744 		goto out4;
4745 	}
4746 
4747 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4748 					usb_endpoint_num(&ep_intr->desc));
4749 
4750 	ret = lan78xx_bind(dev, intf);
4751 	if (ret < 0)
4752 		goto out4;
4753 
4754 	period = ep_intr->desc.bInterval;
4755 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4756 
4757 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4758 	if (!dev->urb_intr) {
4759 		ret = -ENOMEM;
4760 		goto out5;
4761 	}
4762 
4763 	buf = kmalloc(maxp, GFP_KERNEL);
4764 	if (!buf) {
4765 		ret = -ENOMEM;
4766 		goto free_urbs;
4767 	}
4768 
4769 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4770 			 dev->pipe_intr, buf, maxp,
4771 			 intr_complete, dev, period);
4772 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4773 
4774 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4775 
4776 	/* Reject broken descriptors. */
4777 	if (dev->maxpacket == 0) {
4778 		ret = -ENODEV;
4779 		goto free_urbs;
4780 	}
4781 
4782 	/* driver requires remote-wakeup capability during autosuspend. */
4783 	intf->needs_remote_wakeup = 1;
4784 
4785 	ret = lan78xx_phy_init(dev);
4786 	if (ret < 0)
4787 		goto free_urbs;
4788 
4789 	ret = register_netdev(netdev);
4790 	if (ret != 0) {
4791 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4792 		goto out8;
4793 	}
4794 
4795 	usb_set_intfdata(intf, dev);
4796 
4797 	ret = device_set_wakeup_enable(&udev->dev, true);
4798 
4799 	 /* Default delay of 2sec has more overhead than advantage.
4800 	  * Set to 10sec as default.
4801 	  */
4802 	pm_runtime_set_autosuspend_delay(&udev->dev,
4803 					 DEFAULT_AUTOSUSPEND_DELAY);
4804 
4805 	return 0;
4806 
4807 out8:
4808 	phy_disconnect(netdev->phydev);
4809 free_urbs:
4810 	usb_free_urb(dev->urb_intr);
4811 out5:
4812 	lan78xx_unbind(dev, intf);
4813 out4:
4814 	netif_napi_del(&dev->napi);
4815 	lan78xx_free_rx_resources(dev);
4816 out3:
4817 	lan78xx_free_tx_resources(dev);
4818 out2:
4819 	free_netdev(netdev);
4820 out1:
4821 	usb_put_dev(udev);
4822 
4823 	return ret;
4824 }
4825 
4826 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4827 {
4828 	const u16 crc16poly = 0x8005;
4829 	int i;
4830 	u16 bit, crc, msb;
4831 	u8 data;
4832 
4833 	crc = 0xFFFF;
4834 	for (i = 0; i < len; i++) {
4835 		data = *buf++;
4836 		for (bit = 0; bit < 8; bit++) {
4837 			msb = crc >> 15;
4838 			crc <<= 1;
4839 
4840 			if (msb ^ (u16)(data & 1)) {
4841 				crc ^= crc16poly;
4842 				crc |= (u16)0x0001U;
4843 			}
4844 			data >>= 1;
4845 		}
4846 	}
4847 
4848 	return crc;
4849 }
4850 
4851 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4852 {
4853 	u32 buf;
4854 	int ret;
4855 
4856 	ret = lan78xx_stop_tx_path(dev);
4857 	if (ret < 0)
4858 		return ret;
4859 
4860 	ret = lan78xx_stop_rx_path(dev);
4861 	if (ret < 0)
4862 		return ret;
4863 
4864 	/* auto suspend (selective suspend) */
4865 
4866 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4867 	if (ret < 0)
4868 		return ret;
4869 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4870 	if (ret < 0)
4871 		return ret;
4872 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4873 	if (ret < 0)
4874 		return ret;
4875 
4876 	/* set goodframe wakeup */
4877 
4878 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4879 	if (ret < 0)
4880 		return ret;
4881 
4882 	buf |= WUCSR_RFE_WAKE_EN_;
4883 	buf |= WUCSR_STORE_WAKE_;
4884 
4885 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4886 	if (ret < 0)
4887 		return ret;
4888 
4889 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4890 	if (ret < 0)
4891 		return ret;
4892 
4893 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4894 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4895 	buf |= PMT_CTL_PHY_WAKE_EN_;
4896 	buf |= PMT_CTL_WOL_EN_;
4897 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4898 	buf |= PMT_CTL_SUS_MODE_3_;
4899 
4900 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4901 	if (ret < 0)
4902 		return ret;
4903 
4904 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4905 	if (ret < 0)
4906 		return ret;
4907 
4908 	buf |= PMT_CTL_WUPS_MASK_;
4909 
4910 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4911 	if (ret < 0)
4912 		return ret;
4913 
4914 	ret = lan78xx_start_rx_path(dev);
4915 
4916 	return ret;
4917 }
4918 
4919 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4920 {
4921 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4922 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4923 	const u8 arp_type[2] = { 0x08, 0x06 };
4924 	u32 temp_pmt_ctl;
4925 	int mask_index;
4926 	u32 temp_wucsr;
4927 	u32 buf;
4928 	u16 crc;
4929 	int ret;
4930 
4931 	ret = lan78xx_stop_tx_path(dev);
4932 	if (ret < 0)
4933 		return ret;
4934 	ret = lan78xx_stop_rx_path(dev);
4935 	if (ret < 0)
4936 		return ret;
4937 
4938 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4939 	if (ret < 0)
4940 		return ret;
4941 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4942 	if (ret < 0)
4943 		return ret;
4944 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4945 	if (ret < 0)
4946 		return ret;
4947 
4948 	temp_wucsr = 0;
4949 
4950 	temp_pmt_ctl = 0;
4951 
4952 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4953 	if (ret < 0)
4954 		return ret;
4955 
4956 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4957 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4958 
4959 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4960 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4961 		if (ret < 0)
4962 			return ret;
4963 	}
4964 
4965 	mask_index = 0;
4966 	if (wol & WAKE_PHY) {
4967 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4968 
4969 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4970 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4971 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4972 	}
4973 	if (wol & WAKE_MAGIC) {
4974 		temp_wucsr |= WUCSR_MPEN_;
4975 
4976 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4977 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4978 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4979 	}
4980 	if (wol & WAKE_BCAST) {
4981 		temp_wucsr |= WUCSR_BCST_EN_;
4982 
4983 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4984 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4985 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4986 	}
4987 	if (wol & WAKE_MCAST) {
4988 		temp_wucsr |= WUCSR_WAKE_EN_;
4989 
4990 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4991 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4992 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4993 					WUF_CFGX_EN_ |
4994 					WUF_CFGX_TYPE_MCAST_ |
4995 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4996 					(crc & WUF_CFGX_CRC16_MASK_));
4997 		if (ret < 0)
4998 			return ret;
4999 
5000 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
5001 		if (ret < 0)
5002 			return ret;
5003 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5004 		if (ret < 0)
5005 			return ret;
5006 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5007 		if (ret < 0)
5008 			return ret;
5009 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5010 		if (ret < 0)
5011 			return ret;
5012 
5013 		mask_index++;
5014 
5015 		/* for IPv6 Multicast */
5016 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
5017 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5018 					WUF_CFGX_EN_ |
5019 					WUF_CFGX_TYPE_MCAST_ |
5020 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5021 					(crc & WUF_CFGX_CRC16_MASK_));
5022 		if (ret < 0)
5023 			return ret;
5024 
5025 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
5026 		if (ret < 0)
5027 			return ret;
5028 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5029 		if (ret < 0)
5030 			return ret;
5031 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5032 		if (ret < 0)
5033 			return ret;
5034 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5035 		if (ret < 0)
5036 			return ret;
5037 
5038 		mask_index++;
5039 
5040 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5041 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5042 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5043 	}
5044 	if (wol & WAKE_UCAST) {
5045 		temp_wucsr |= WUCSR_PFDA_EN_;
5046 
5047 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5048 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5049 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5050 	}
5051 	if (wol & WAKE_ARP) {
5052 		temp_wucsr |= WUCSR_WAKE_EN_;
5053 
5054 		/* set WUF_CFG & WUF_MASK
5055 		 * for packettype (offset 12,13) = ARP (0x0806)
5056 		 */
5057 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5058 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5059 					WUF_CFGX_EN_ |
5060 					WUF_CFGX_TYPE_ALL_ |
5061 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5062 					(crc & WUF_CFGX_CRC16_MASK_));
5063 		if (ret < 0)
5064 			return ret;
5065 
5066 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5067 		if (ret < 0)
5068 			return ret;
5069 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5070 		if (ret < 0)
5071 			return ret;
5072 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5073 		if (ret < 0)
5074 			return ret;
5075 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5076 		if (ret < 0)
5077 			return ret;
5078 
5079 		mask_index++;
5080 
5081 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5082 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5083 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5084 	}
5085 
5086 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5087 	if (ret < 0)
5088 		return ret;
5089 
5090 	/* when multiple WOL bits are set */
5091 	if (hweight_long((unsigned long)wol) > 1) {
5092 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5093 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5094 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5095 	}
5096 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5097 	if (ret < 0)
5098 		return ret;
5099 
5100 	/* clear WUPS */
5101 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5102 	if (ret < 0)
5103 		return ret;
5104 
5105 	buf |= PMT_CTL_WUPS_MASK_;
5106 
5107 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5108 	if (ret < 0)
5109 		return ret;
5110 
5111 	ret = lan78xx_start_rx_path(dev);
5112 
5113 	return ret;
5114 }
5115 
5116 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5117 {
5118 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5119 	bool dev_open;
5120 	int ret;
5121 
5122 	mutex_lock(&dev->dev_mutex);
5123 
5124 	netif_dbg(dev, ifdown, dev->net,
5125 		  "suspending: pm event %#x", message.event);
5126 
5127 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5128 
5129 	if (dev_open) {
5130 		spin_lock_irq(&dev->txq.lock);
5131 		/* don't autosuspend while transmitting */
5132 		if ((skb_queue_len(&dev->txq) ||
5133 		     skb_queue_len(&dev->txq_pend)) &&
5134 		    PMSG_IS_AUTO(message)) {
5135 			spin_unlock_irq(&dev->txq.lock);
5136 			ret = -EBUSY;
5137 			goto out;
5138 		} else {
5139 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5140 			spin_unlock_irq(&dev->txq.lock);
5141 		}
5142 
5143 		/* stop RX */
5144 		ret = lan78xx_stop_rx_path(dev);
5145 		if (ret < 0)
5146 			goto out;
5147 
5148 		ret = lan78xx_flush_rx_fifo(dev);
5149 		if (ret < 0)
5150 			goto out;
5151 
5152 		/* stop Tx */
5153 		ret = lan78xx_stop_tx_path(dev);
5154 		if (ret < 0)
5155 			goto out;
5156 
5157 		/* empty out the Rx and Tx queues */
5158 		netif_device_detach(dev->net);
5159 		lan78xx_terminate_urbs(dev);
5160 		usb_kill_urb(dev->urb_intr);
5161 
5162 		/* reattach */
5163 		netif_device_attach(dev->net);
5164 
5165 		timer_delete(&dev->stat_monitor);
5166 
5167 		if (PMSG_IS_AUTO(message)) {
5168 			ret = lan78xx_set_auto_suspend(dev);
5169 			if (ret < 0)
5170 				goto out;
5171 		} else {
5172 			struct lan78xx_priv *pdata;
5173 
5174 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5175 			netif_carrier_off(dev->net);
5176 			ret = lan78xx_set_suspend(dev, pdata->wol);
5177 			if (ret < 0)
5178 				goto out;
5179 		}
5180 	} else {
5181 		/* Interface is down; don't allow WOL and PHY
5182 		 * events to wake up the host
5183 		 */
5184 		u32 buf;
5185 
5186 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5187 
5188 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5189 		if (ret < 0)
5190 			goto out;
5191 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5192 		if (ret < 0)
5193 			goto out;
5194 
5195 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5196 		if (ret < 0)
5197 			goto out;
5198 
5199 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5200 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5201 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5202 		buf |= PMT_CTL_SUS_MODE_3_;
5203 
5204 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5205 		if (ret < 0)
5206 			goto out;
5207 
5208 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5209 		if (ret < 0)
5210 			goto out;
5211 
5212 		buf |= PMT_CTL_WUPS_MASK_;
5213 
5214 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5215 		if (ret < 0)
5216 			goto out;
5217 	}
5218 
5219 	ret = 0;
5220 out:
5221 	mutex_unlock(&dev->dev_mutex);
5222 
5223 	return ret;
5224 }
5225 
5226 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5227 {
5228 	bool pipe_halted = false;
5229 	struct urb *urb;
5230 
5231 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5232 		struct sk_buff *skb = urb->context;
5233 		int ret;
5234 
5235 		if (!netif_device_present(dev->net) ||
5236 		    !netif_carrier_ok(dev->net) ||
5237 		    pipe_halted) {
5238 			lan78xx_release_tx_buf(dev, skb);
5239 			continue;
5240 		}
5241 
5242 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5243 
5244 		if (ret == 0) {
5245 			netif_trans_update(dev->net);
5246 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5247 		} else {
5248 			if (ret == -EPIPE) {
5249 				netif_stop_queue(dev->net);
5250 				pipe_halted = true;
5251 			} else if (ret == -ENODEV) {
5252 				netif_device_detach(dev->net);
5253 			}
5254 
5255 			lan78xx_release_tx_buf(dev, skb);
5256 		}
5257 	}
5258 
5259 	return pipe_halted;
5260 }
5261 
5262 static int lan78xx_resume(struct usb_interface *intf)
5263 {
5264 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5265 	bool dev_open;
5266 	int ret;
5267 
5268 	mutex_lock(&dev->dev_mutex);
5269 
5270 	netif_dbg(dev, ifup, dev->net, "resuming device");
5271 
5272 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5273 
5274 	if (dev_open) {
5275 		bool pipe_halted = false;
5276 
5277 		ret = lan78xx_flush_tx_fifo(dev);
5278 		if (ret < 0)
5279 			goto out;
5280 
5281 		if (dev->urb_intr) {
5282 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5283 
5284 			if (ret < 0) {
5285 				if (ret == -ENODEV)
5286 					netif_device_detach(dev->net);
5287 				netdev_warn(dev->net, "Failed to submit intr URB");
5288 			}
5289 		}
5290 
5291 		spin_lock_irq(&dev->txq.lock);
5292 
5293 		if (netif_device_present(dev->net)) {
5294 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5295 
5296 			if (pipe_halted)
5297 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5298 		}
5299 
5300 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5301 
5302 		spin_unlock_irq(&dev->txq.lock);
5303 
5304 		if (!pipe_halted &&
5305 		    netif_device_present(dev->net) &&
5306 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5307 			netif_start_queue(dev->net);
5308 
5309 		ret = lan78xx_start_tx_path(dev);
5310 		if (ret < 0)
5311 			goto out;
5312 
5313 		napi_schedule(&dev->napi);
5314 
5315 		if (!timer_pending(&dev->stat_monitor)) {
5316 			dev->delta = 1;
5317 			mod_timer(&dev->stat_monitor,
5318 				  jiffies + STAT_UPDATE_TIMER);
5319 		}
5320 
5321 	} else {
5322 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5323 	}
5324 
5325 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5326 	if (ret < 0)
5327 		goto out;
5328 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5329 	if (ret < 0)
5330 		goto out;
5331 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5332 	if (ret < 0)
5333 		goto out;
5334 
5335 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5336 					     WUCSR2_ARP_RCD_ |
5337 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5338 					     WUCSR2_IPV4_TCPSYN_RCD_);
5339 	if (ret < 0)
5340 		goto out;
5341 
5342 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5343 					    WUCSR_EEE_RX_WAKE_ |
5344 					    WUCSR_PFDA_FR_ |
5345 					    WUCSR_RFE_WAKE_FR_ |
5346 					    WUCSR_WUFR_ |
5347 					    WUCSR_MPR_ |
5348 					    WUCSR_BCST_FR_);
5349 	if (ret < 0)
5350 		goto out;
5351 
5352 	ret = 0;
5353 out:
5354 	mutex_unlock(&dev->dev_mutex);
5355 
5356 	return ret;
5357 }
5358 
5359 static int lan78xx_reset_resume(struct usb_interface *intf)
5360 {
5361 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5362 	int ret;
5363 
5364 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5365 
5366 	ret = lan78xx_reset(dev);
5367 	if (ret < 0)
5368 		return ret;
5369 
5370 	phy_start(dev->net->phydev);
5371 
5372 	ret = lan78xx_resume(intf);
5373 
5374 	return ret;
5375 }
5376 
5377 static const struct usb_device_id products[] = {
5378 	{
5379 	/* LAN7800 USB Gigabit Ethernet Device */
5380 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5381 	},
5382 	{
5383 	/* LAN7850 USB Gigabit Ethernet Device */
5384 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5385 	},
5386 	{
5387 	/* LAN7801 USB Gigabit Ethernet Device */
5388 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5389 	},
5390 	{
5391 	/* ATM2-AF USB Gigabit Ethernet Device */
5392 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5393 	},
5394 	{},
5395 };
5396 MODULE_DEVICE_TABLE(usb, products);
5397 
5398 static struct usb_driver lan78xx_driver = {
5399 	.name			= DRIVER_NAME,
5400 	.id_table		= products,
5401 	.probe			= lan78xx_probe,
5402 	.disconnect		= lan78xx_disconnect,
5403 	.suspend		= lan78xx_suspend,
5404 	.resume			= lan78xx_resume,
5405 	.reset_resume		= lan78xx_reset_resume,
5406 	.supports_autosuspend	= 1,
5407 	.disable_hub_initiated_lpm = 1,
5408 };
5409 
5410 module_usb_driver(lan78xx_driver);
5411 
5412 MODULE_AUTHOR(DRIVER_AUTHOR);
5413 MODULE_DESCRIPTION(DRIVER_DESC);
5414 MODULE_LICENSE("GPL");
5415