xref: /linux/drivers/net/usb/lan78xx.c (revision cea465a96a294e7bc2537f27a737cfa7c6234b3d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret < 0 ? ret : 0;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret < 0 ? ret : 0;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	return lan78xx_write_reg(dev, reg, buf);
678 }
679 
680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 			      struct lan78xx_statstage *data)
682 {
683 	int ret = 0;
684 	int i;
685 	struct lan78xx_statstage *stats;
686 	u32 *src;
687 	u32 *dst;
688 
689 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 	if (!stats)
691 		return -ENOMEM;
692 
693 	ret = usb_control_msg(dev->udev,
694 			      usb_rcvctrlpipe(dev->udev, 0),
695 			      USB_VENDOR_REQUEST_GET_STATS,
696 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 			      0,
698 			      0,
699 			      (void *)stats,
700 			      sizeof(*stats),
701 			      USB_CTRL_SET_TIMEOUT);
702 	if (likely(ret >= 0)) {
703 		src = (u32 *)stats;
704 		dst = (u32 *)data;
705 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 			le32_to_cpus(&src[i]);
707 			dst[i] = src[i];
708 		}
709 	} else {
710 		netdev_warn(dev->net,
711 			    "Failed to read stat ret = %d", ret);
712 	}
713 
714 	kfree(stats);
715 
716 	return ret;
717 }
718 
719 #define check_counter_rollover(struct1, dev_stats, member)		\
720 	do {								\
721 		if ((struct1)->member < (dev_stats).saved.member)	\
722 			(dev_stats).rollover_count.member++;		\
723 	} while (0)
724 
725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 					struct lan78xx_statstage *stats)
727 {
728 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775 
776 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778 
779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 	u32 *p, *count, *max;
782 	u64 *data;
783 	int i;
784 	struct lan78xx_statstage lan78xx_stats;
785 
786 	if (usb_autopm_get_interface(dev->intf) < 0)
787 		return;
788 
789 	p = (u32 *)&lan78xx_stats;
790 	count = (u32 *)&dev->stats.rollover_count;
791 	max = (u32 *)&dev->stats.rollover_max;
792 	data = (u64 *)&dev->stats.curr_stat;
793 
794 	mutex_lock(&dev->stats.access_lock);
795 
796 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798 
799 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801 
802 	mutex_unlock(&dev->stats.access_lock);
803 
804 	usb_autopm_put_interface(dev->intf);
805 }
806 
807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811 
812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 			   u32 hw_disabled)
814 {
815 	unsigned long timeout;
816 	bool stopped = true;
817 	int ret;
818 	u32 buf;
819 
820 	/* Stop the h/w block (if not already stopped) */
821 
822 	ret = lan78xx_read_reg(dev, reg, &buf);
823 	if (ret < 0)
824 		return ret;
825 
826 	if (buf & hw_enabled) {
827 		buf &= ~hw_enabled;
828 
829 		ret = lan78xx_write_reg(dev, reg, buf);
830 		if (ret < 0)
831 			return ret;
832 
833 		stopped = false;
834 		timeout = jiffies + HW_DISABLE_TIMEOUT;
835 		do  {
836 			ret = lan78xx_read_reg(dev, reg, &buf);
837 			if (ret < 0)
838 				return ret;
839 
840 			if (buf & hw_disabled)
841 				stopped = true;
842 			else
843 				msleep(HW_DISABLE_DELAY_MS);
844 		} while (!stopped && !time_after(jiffies, timeout));
845 	}
846 
847 	return stopped ? 0 : -ETIMEDOUT;
848 }
849 
850 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851 {
852 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853 }
854 
855 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856 {
857 	int ret;
858 
859 	netif_dbg(dev, drv, dev->net, "start tx path");
860 
861 	/* Start the MAC transmitter */
862 
863 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 	if (ret < 0)
865 		return ret;
866 
867 	/* Start the Tx FIFO */
868 
869 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 	if (ret < 0)
871 		return ret;
872 
873 	return 0;
874 }
875 
876 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877 {
878 	int ret;
879 
880 	netif_dbg(dev, drv, dev->net, "stop tx path");
881 
882 	/* Stop the Tx FIFO */
883 
884 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 	if (ret < 0)
886 		return ret;
887 
888 	/* Stop the MAC transmitter */
889 
890 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 	if (ret < 0)
892 		return ret;
893 
894 	return 0;
895 }
896 
897 /* The caller must ensure the Tx path is stopped before calling
898  * lan78xx_flush_tx_fifo().
899  */
900 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901 {
902 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903 }
904 
905 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906 {
907 	int ret;
908 
909 	netif_dbg(dev, drv, dev->net, "start rx path");
910 
911 	/* Start the Rx FIFO */
912 
913 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 	if (ret < 0)
915 		return ret;
916 
917 	/* Start the MAC receiver*/
918 
919 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 	if (ret < 0)
921 		return ret;
922 
923 	return 0;
924 }
925 
926 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927 {
928 	int ret;
929 
930 	netif_dbg(dev, drv, dev->net, "stop rx path");
931 
932 	/* Stop the MAC receiver */
933 
934 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 	if (ret < 0)
936 		return ret;
937 
938 	/* Stop the Rx FIFO */
939 
940 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 	if (ret < 0)
942 		return ret;
943 
944 	return 0;
945 }
946 
947 /* The caller must ensure the Rx path is stopped before calling
948  * lan78xx_flush_rx_fifo().
949  */
950 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951 {
952 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953 }
954 
955 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
956 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
957 {
958 	unsigned long start_time = jiffies;
959 	u32 val;
960 	int ret;
961 
962 	do {
963 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
964 		if (ret < 0)
965 			return ret;
966 
967 		if (!(val & MII_ACC_MII_BUSY_))
968 			return 0;
969 	} while (!time_after(jiffies, start_time + HZ));
970 
971 	return -ETIMEDOUT;
972 }
973 
974 static inline u32 mii_access(int id, int index, int read)
975 {
976 	u32 ret;
977 
978 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 	if (read)
981 		ret |= MII_ACC_MII_READ_;
982 	else
983 		ret |= MII_ACC_MII_WRITE_;
984 	ret |= MII_ACC_MII_BUSY_;
985 
986 	return ret;
987 }
988 
989 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990 {
991 	unsigned long start_time = jiffies;
992 	u32 val;
993 	int ret;
994 
995 	do {
996 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
997 		if (ret < 0)
998 			return ret;
999 
1000 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 		    (val & E2P_CMD_EPC_TIMEOUT_))
1002 			break;
1003 		usleep_range(40, 100);
1004 	} while (!time_after(jiffies, start_time + HZ));
1005 
1006 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 		netdev_warn(dev->net, "EEPROM read operation timeout");
1008 		return -ETIMEDOUT;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015 {
1016 	unsigned long start_time = jiffies;
1017 	u32 val;
1018 	int ret;
1019 
1020 	do {
1021 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1022 		if (ret < 0)
1023 			return ret;
1024 
1025 		if (!(val & E2P_CMD_EPC_BUSY_))
1026 			return 0;
1027 
1028 		usleep_range(40, 100);
1029 	} while (!time_after(jiffies, start_time + HZ));
1030 
1031 	netdev_warn(dev->net, "EEPROM is busy");
1032 	return -ETIMEDOUT;
1033 }
1034 
1035 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 				   u32 length, u8 *data)
1037 {
1038 	u32 val, saved;
1039 	int i, ret;
1040 
1041 	/* depends on chip, some EEPROM pins are muxed with LED function.
1042 	 * disable & restore LED function to access EEPROM.
1043 	 */
1044 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	saved = val;
1049 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1050 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1052 		if (ret < 0)
1053 			return ret;
1054 	}
1055 
1056 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 	if (ret == -ETIMEDOUT)
1058 		goto read_raw_eeprom_done;
1059 	/* If USB fails, there is nothing to do */
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	for (i = 0; i < length; i++) {
1064 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1067 		if (ret < 0)
1068 			return ret;
1069 
1070 		ret = lan78xx_wait_eeprom(dev);
1071 		/* Looks like not USB specific error, try to recover */
1072 		if (ret == -ETIMEDOUT)
1073 			goto read_raw_eeprom_done;
1074 		/* If USB fails, there is nothing to do */
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1079 		if (ret < 0)
1080 			return ret;
1081 
1082 		data[i] = val & 0xFF;
1083 		offset++;
1084 	}
1085 
1086 read_raw_eeprom_done:
1087 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1088 		return lan78xx_write_reg(dev, HW_CFG, saved);
1089 
1090 	return 0;
1091 }
1092 
1093 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 			       u32 length, u8 *data)
1095 {
1096 	int ret;
1097 	u8 sig;
1098 
1099 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1100 	if (ret < 0)
1101 		return ret;
1102 
1103 	if (sig != EEPROM_INDICATOR)
1104 		return -ENODATA;
1105 
1106 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1107 }
1108 
1109 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 				    u32 length, u8 *data)
1111 {
1112 	u32 val;
1113 	u32 saved;
1114 	int i, ret;
1115 
1116 	/* depends on chip, some EEPROM pins are muxed with LED function.
1117 	 * disable & restore LED function to access EEPROM.
1118 	 */
1119 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1120 	if (ret < 0)
1121 		return ret;
1122 
1123 	saved = val;
1124 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1125 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1127 		if (ret < 0)
1128 			return ret;
1129 	}
1130 
1131 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 	/* Looks like not USB specific error, try to recover */
1133 	if (ret == -ETIMEDOUT)
1134 		goto write_raw_eeprom_done;
1135 	/* If USB fails, there is nothing to do */
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	/* Issue write/erase enable command */
1140 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	ret = lan78xx_wait_eeprom(dev);
1146 	/* Looks like not USB specific error, try to recover */
1147 	if (ret == -ETIMEDOUT)
1148 		goto write_raw_eeprom_done;
1149 	/* If USB fails, there is nothing to do */
1150 	if (ret < 0)
1151 		return ret;
1152 
1153 	for (i = 0; i < length; i++) {
1154 		/* Fill data register */
1155 		val = data[i];
1156 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1157 		if (ret < 0)
1158 			return ret;
1159 
1160 		/* Send "write" command */
1161 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1164 		if (ret < 0)
1165 			return ret;
1166 
1167 		ret = lan78xx_wait_eeprom(dev);
1168 		/* Looks like not USB specific error, try to recover */
1169 		if (ret == -ETIMEDOUT)
1170 			goto write_raw_eeprom_done;
1171 		/* If USB fails, there is nothing to do */
1172 		if (ret < 0)
1173 			return ret;
1174 
1175 		offset++;
1176 	}
1177 
1178 write_raw_eeprom_done:
1179 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1180 		return lan78xx_write_reg(dev, HW_CFG, saved);
1181 
1182 	return 0;
1183 }
1184 
1185 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 				u32 length, u8 *data)
1187 {
1188 	unsigned long timeout;
1189 	int ret, i;
1190 	u32 buf;
1191 
1192 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 	if (ret < 0)
1194 		return ret;
1195 
1196 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 		/* clear it and wait to be cleared */
1198 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 		if (ret < 0)
1200 			return ret;
1201 
1202 		timeout = jiffies + HZ;
1203 		do {
1204 			usleep_range(1, 10);
1205 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 			if (ret < 0)
1207 				return ret;
1208 
1209 			if (time_after(jiffies, timeout)) {
1210 				netdev_warn(dev->net,
1211 					    "timeout on OTP_PWR_DN");
1212 				return -ETIMEDOUT;
1213 			}
1214 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1215 	}
1216 
1217 	for (i = 0; i < length; i++) {
1218 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 		if (ret < 0)
1221 			return ret;
1222 
1223 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 					((offset + i) & OTP_ADDR2_10_3));
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 		if (ret < 0)
1234 			return ret;
1235 
1236 		timeout = jiffies + HZ;
1237 		do {
1238 			udelay(1);
1239 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 			if (ret < 0)
1241 				return ret;
1242 
1243 			if (time_after(jiffies, timeout)) {
1244 				netdev_warn(dev->net,
1245 					    "timeout on OTP_STATUS");
1246 				return -ETIMEDOUT;
1247 			}
1248 		} while (buf & OTP_STATUS_BUSY_);
1249 
1250 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 		if (ret < 0)
1252 			return ret;
1253 
1254 		data[i] = (u8)(buf & 0xFF);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 				 u32 length, u8 *data)
1262 {
1263 	int i;
1264 	u32 buf;
1265 	unsigned long timeout;
1266 	int ret;
1267 
1268 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 		/* clear it and wait to be cleared */
1274 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 		if (ret < 0)
1276 			return ret;
1277 
1278 		timeout = jiffies + HZ;
1279 		do {
1280 			udelay(1);
1281 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 			if (ret < 0)
1283 				return ret;
1284 
1285 			if (time_after(jiffies, timeout)) {
1286 				netdev_warn(dev->net,
1287 					    "timeout on OTP_PWR_DN completion");
1288 				return -ETIMEDOUT;
1289 			}
1290 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1291 	}
1292 
1293 	/* set to BYTE program mode */
1294 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 	if (ret < 0)
1296 		return ret;
1297 
1298 	for (i = 0; i < length; i++) {
1299 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 		if (ret < 0)
1302 			return ret;
1303 
1304 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 					((offset + i) & OTP_ADDR2_10_3));
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 		if (ret < 0)
1319 			return ret;
1320 
1321 		timeout = jiffies + HZ;
1322 		do {
1323 			udelay(1);
1324 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 			if (ret < 0)
1326 				return ret;
1327 
1328 			if (time_after(jiffies, timeout)) {
1329 				netdev_warn(dev->net,
1330 					    "Timeout on OTP_STATUS completion");
1331 				return -ETIMEDOUT;
1332 			}
1333 		} while (buf & OTP_STATUS_BUSY_);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 			    u32 length, u8 *data)
1341 {
1342 	u8 sig;
1343 	int ret;
1344 
1345 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346 
1347 	if (ret == 0) {
1348 		if (sig == OTP_INDICATOR_2)
1349 			offset += 0x100;
1350 		else if (sig != OTP_INDICATOR_1)
1351 			ret = -EINVAL;
1352 		if (!ret)
1353 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360 {
1361 	int i, ret;
1362 
1363 	for (i = 0; i < 100; i++) {
1364 		u32 dp_sel;
1365 
1366 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 		if (unlikely(ret < 0))
1368 			return ret;
1369 
1370 		if (dp_sel & DP_SEL_DPRDY_)
1371 			return 0;
1372 
1373 		usleep_range(40, 100);
1374 	}
1375 
1376 	netdev_warn(dev->net, "%s timed out", __func__);
1377 
1378 	return -ETIMEDOUT;
1379 }
1380 
1381 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 				  u32 addr, u32 length, u32 *buf)
1383 {
1384 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 	int i, ret;
1386 
1387 	ret = usb_autopm_get_interface(dev->intf);
1388 	if (ret < 0)
1389 		return ret;
1390 
1391 	mutex_lock(&pdata->dataport_mutex);
1392 
1393 	ret = lan78xx_dataport_wait_not_busy(dev);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 	if (ret < 0)
1399 		goto dataport_write;
1400 
1401 	for (i = 0; i < length; i++) {
1402 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 
1414 		ret = lan78xx_dataport_wait_not_busy(dev);
1415 		if (ret < 0)
1416 			goto dataport_write;
1417 	}
1418 
1419 dataport_write:
1420 	if (ret < 0)
1421 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422 
1423 	mutex_unlock(&pdata->dataport_mutex);
1424 	usb_autopm_put_interface(dev->intf);
1425 
1426 	return ret;
1427 }
1428 
1429 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 				    int index, u8 addr[ETH_ALEN])
1431 {
1432 	u32 temp;
1433 
1434 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 		temp = addr[3];
1436 		temp = addr[2] | (temp << 8);
1437 		temp = addr[1] | (temp << 8);
1438 		temp = addr[0] | (temp << 8);
1439 		pdata->pfilter_table[index][1] = temp;
1440 		temp = addr[5];
1441 		temp = addr[4] | (temp << 8);
1442 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 		pdata->pfilter_table[index][0] = temp;
1444 	}
1445 }
1446 
1447 /* returns hash bit number for given MAC address */
1448 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449 {
1450 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451 }
1452 
1453 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454 {
1455 	struct lan78xx_priv *pdata =
1456 			container_of(param, struct lan78xx_priv, set_multicast);
1457 	struct lan78xx_net *dev = pdata->dev;
1458 	int i, ret;
1459 
1460 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 		  pdata->rfe_ctl);
1462 
1463 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 				     DP_SEL_VHF_VLAN_LEN,
1465 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 	if (ret < 0)
1467 		goto multicast_write_done;
1468 
1469 	for (i = 1; i < NUM_OF_MAF; i++) {
1470 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 		if (ret < 0)
1472 			goto multicast_write_done;
1473 
1474 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 					pdata->pfilter_table[i][1]);
1476 		if (ret < 0)
1477 			goto multicast_write_done;
1478 
1479 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 					pdata->pfilter_table[i][0]);
1481 		if (ret < 0)
1482 			goto multicast_write_done;
1483 	}
1484 
1485 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486 
1487 multicast_write_done:
1488 	if (ret < 0)
1489 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 	return;
1491 }
1492 
1493 static void lan78xx_set_multicast(struct net_device *netdev)
1494 {
1495 	struct lan78xx_net *dev = netdev_priv(netdev);
1496 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 	unsigned long flags;
1498 	int i;
1499 
1500 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501 
1502 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504 
1505 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1506 		pdata->mchash_table[i] = 0;
1507 
1508 	/* pfilter_table[0] has own HW address */
1509 	for (i = 1; i < NUM_OF_MAF; i++) {
1510 		pdata->pfilter_table[i][0] = 0;
1511 		pdata->pfilter_table[i][1] = 0;
1512 	}
1513 
1514 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515 
1516 	if (dev->net->flags & IFF_PROMISC) {
1517 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 	} else {
1520 		if (dev->net->flags & IFF_ALLMULTI) {
1521 			netif_dbg(dev, drv, dev->net,
1522 				  "receive all multicast enabled");
1523 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 		}
1525 	}
1526 
1527 	if (netdev_mc_count(dev->net)) {
1528 		struct netdev_hw_addr *ha;
1529 		int i;
1530 
1531 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532 
1533 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534 
1535 		i = 1;
1536 		netdev_for_each_mc_addr(ha, netdev) {
1537 			/* set first 32 into Perfect Filter */
1538 			if (i < 33) {
1539 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 			} else {
1541 				u32 bitnum = lan78xx_hash(ha->addr);
1542 
1543 				pdata->mchash_table[bitnum / 32] |=
1544 							(1 << (bitnum % 32));
1545 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 			}
1547 			i++;
1548 		}
1549 	}
1550 
1551 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552 
1553 	/* defer register writes to a sleepable context */
1554 	schedule_work(&pdata->set_multicast);
1555 }
1556 
1557 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
1558 					 bool tx_pause, bool rx_pause);
1559 
1560 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1561 				      u16 lcladv, u16 rmtadv)
1562 {
1563 	u8 cap;
1564 
1565 	if (dev->fc_autoneg)
1566 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1567 	else
1568 		cap = dev->fc_request_control;
1569 
1570 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1571 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1572 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1573 
1574 	return lan78xx_configure_flowcontrol(dev,
1575 					     cap & FLOW_CTRL_TX,
1576 					     cap & FLOW_CTRL_RX);
1577 }
1578 
1579 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1580 
1581 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1582 {
1583 	unsigned long start_time = jiffies;
1584 	u32 val;
1585 	int ret;
1586 
1587 	mutex_lock(&dev->mdiobus_mutex);
1588 
1589 	/* Resetting the device while there is activity on the MDIO
1590 	 * bus can result in the MAC interface locking up and not
1591 	 * completing register access transactions.
1592 	 */
1593 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1594 	if (ret < 0)
1595 		goto exit_unlock;
1596 
1597 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1598 	if (ret < 0)
1599 		goto exit_unlock;
1600 
1601 	val |= MAC_CR_RST_;
1602 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1603 	if (ret < 0)
1604 		goto exit_unlock;
1605 
1606 	/* Wait for the reset to complete before allowing any further
1607 	 * MAC register accesses otherwise the MAC may lock up.
1608 	 */
1609 	do {
1610 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1611 		if (ret < 0)
1612 			goto exit_unlock;
1613 
1614 		if (!(val & MAC_CR_RST_)) {
1615 			ret = 0;
1616 			goto exit_unlock;
1617 		}
1618 	} while (!time_after(jiffies, start_time + HZ));
1619 
1620 	ret = -ETIMEDOUT;
1621 exit_unlock:
1622 	mutex_unlock(&dev->mdiobus_mutex);
1623 
1624 	return ret;
1625 }
1626 
1627 /**
1628  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1629  * @dev: pointer to the LAN78xx device structure
1630  *
1631  * This function acknowledges the PHY interrupt by setting the
1632  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1633  *
1634  * Return: 0 on success or a negative error code on failure.
1635  */
1636 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1637 {
1638 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1639 }
1640 
1641 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
1642 
1643 static int lan78xx_link_reset(struct lan78xx_net *dev)
1644 {
1645 	struct phy_device *phydev = dev->net->phydev;
1646 	struct ethtool_link_ksettings ecmd;
1647 	int ladv, radv, ret, link;
1648 
1649 	/* clear LAN78xx interrupt status */
1650 	ret = lan78xx_phy_int_ack(dev);
1651 	if (unlikely(ret < 0))
1652 		return ret;
1653 
1654 	mutex_lock(&phydev->lock);
1655 	phy_read_status(phydev);
1656 	link = phydev->link;
1657 	mutex_unlock(&phydev->lock);
1658 
1659 	if (!link && dev->link_on) {
1660 		dev->link_on = false;
1661 
1662 		/* reset MAC */
1663 		ret = lan78xx_mac_reset(dev);
1664 		if (ret < 0)
1665 			return ret;
1666 
1667 		timer_delete(&dev->stat_monitor);
1668 	} else if (link && !dev->link_on) {
1669 		dev->link_on = true;
1670 
1671 		phy_ethtool_ksettings_get(phydev, &ecmd);
1672 
1673 		ret = lan78xx_configure_usb(dev, ecmd.base.speed);
1674 		if (ret < 0)
1675 			return ret;
1676 
1677 		ladv = phy_read(phydev, MII_ADVERTISE);
1678 		if (ladv < 0)
1679 			return ladv;
1680 
1681 		radv = phy_read(phydev, MII_LPA);
1682 		if (radv < 0)
1683 			return radv;
1684 
1685 		netif_dbg(dev, link, dev->net,
1686 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1687 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1688 
1689 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1690 						 radv);
1691 		if (ret < 0)
1692 			return ret;
1693 
1694 		if (!timer_pending(&dev->stat_monitor)) {
1695 			dev->delta = 1;
1696 			mod_timer(&dev->stat_monitor,
1697 				  jiffies + STAT_UPDATE_TIMER);
1698 		}
1699 
1700 		lan78xx_rx_urb_submit_all(dev);
1701 
1702 		local_bh_disable();
1703 		napi_schedule(&dev->napi);
1704 		local_bh_enable();
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 /* some work can't be done in tasklets, so we use keventd
1711  *
1712  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1713  * but tasklet_schedule() doesn't.	hope the failure is rare.
1714  */
1715 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1716 {
1717 	set_bit(work, &dev->flags);
1718 	if (!schedule_delayed_work(&dev->wq, 0))
1719 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1720 }
1721 
1722 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1723 {
1724 	u32 intdata;
1725 
1726 	if (urb->actual_length != 4) {
1727 		netdev_warn(dev->net,
1728 			    "unexpected urb length %d", urb->actual_length);
1729 		return;
1730 	}
1731 
1732 	intdata = get_unaligned_le32(urb->transfer_buffer);
1733 
1734 	if (intdata & INT_ENP_PHY_INT) {
1735 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1736 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1737 
1738 		if (dev->domain_data.phyirq > 0)
1739 			generic_handle_irq_safe(dev->domain_data.phyirq);
1740 	} else {
1741 		netdev_warn(dev->net,
1742 			    "unexpected interrupt: 0x%08x\n", intdata);
1743 	}
1744 }
1745 
1746 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1747 {
1748 	return MAX_EEPROM_SIZE;
1749 }
1750 
1751 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1752 				      struct ethtool_eeprom *ee, u8 *data)
1753 {
1754 	struct lan78xx_net *dev = netdev_priv(netdev);
1755 	int ret;
1756 
1757 	ret = usb_autopm_get_interface(dev->intf);
1758 	if (ret)
1759 		return ret;
1760 
1761 	ee->magic = LAN78XX_EEPROM_MAGIC;
1762 
1763 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1764 
1765 	usb_autopm_put_interface(dev->intf);
1766 
1767 	return ret;
1768 }
1769 
1770 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1771 				      struct ethtool_eeprom *ee, u8 *data)
1772 {
1773 	struct lan78xx_net *dev = netdev_priv(netdev);
1774 	int ret;
1775 
1776 	ret = usb_autopm_get_interface(dev->intf);
1777 	if (ret)
1778 		return ret;
1779 
1780 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1781 	 * to load data from EEPROM
1782 	 */
1783 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1784 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1785 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1786 		 (ee->offset == 0) &&
1787 		 (ee->len == 512) &&
1788 		 (data[0] == OTP_INDICATOR_1))
1789 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1790 
1791 	usb_autopm_put_interface(dev->intf);
1792 
1793 	return ret;
1794 }
1795 
1796 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1797 				u8 *data)
1798 {
1799 	if (stringset == ETH_SS_STATS)
1800 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1801 }
1802 
1803 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1804 {
1805 	if (sset == ETH_SS_STATS)
1806 		return ARRAY_SIZE(lan78xx_gstrings);
1807 	else
1808 		return -EOPNOTSUPP;
1809 }
1810 
1811 static void lan78xx_get_stats(struct net_device *netdev,
1812 			      struct ethtool_stats *stats, u64 *data)
1813 {
1814 	struct lan78xx_net *dev = netdev_priv(netdev);
1815 
1816 	lan78xx_update_stats(dev);
1817 
1818 	mutex_lock(&dev->stats.access_lock);
1819 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1820 	mutex_unlock(&dev->stats.access_lock);
1821 }
1822 
1823 static void lan78xx_get_wol(struct net_device *netdev,
1824 			    struct ethtool_wolinfo *wol)
1825 {
1826 	struct lan78xx_net *dev = netdev_priv(netdev);
1827 	int ret;
1828 	u32 buf;
1829 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1830 
1831 	if (usb_autopm_get_interface(dev->intf) < 0)
1832 		return;
1833 
1834 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1835 	if (unlikely(ret < 0)) {
1836 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1837 		wol->supported = 0;
1838 		wol->wolopts = 0;
1839 	} else {
1840 		if (buf & USB_CFG_RMT_WKP_) {
1841 			wol->supported = WAKE_ALL;
1842 			wol->wolopts = pdata->wol;
1843 		} else {
1844 			wol->supported = 0;
1845 			wol->wolopts = 0;
1846 		}
1847 	}
1848 
1849 	usb_autopm_put_interface(dev->intf);
1850 }
1851 
1852 static int lan78xx_set_wol(struct net_device *netdev,
1853 			   struct ethtool_wolinfo *wol)
1854 {
1855 	struct lan78xx_net *dev = netdev_priv(netdev);
1856 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 	int ret;
1858 
1859 	if (wol->wolopts & ~WAKE_ALL)
1860 		return -EINVAL;
1861 
1862 	ret = usb_autopm_get_interface(dev->intf);
1863 	if (ret < 0)
1864 		return ret;
1865 
1866 	pdata->wol = wol->wolopts;
1867 
1868 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1869 	if (ret < 0)
1870 		goto exit_pm_put;
1871 
1872 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1873 
1874 exit_pm_put:
1875 	usb_autopm_put_interface(dev->intf);
1876 
1877 	return ret;
1878 }
1879 
1880 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1881 {
1882 	struct lan78xx_net *dev = netdev_priv(net);
1883 	struct phy_device *phydev = net->phydev;
1884 	int ret;
1885 	u32 buf;
1886 
1887 	ret = usb_autopm_get_interface(dev->intf);
1888 	if (ret < 0)
1889 		return ret;
1890 
1891 	ret = phy_ethtool_get_eee(phydev, edata);
1892 	if (ret < 0)
1893 		goto exit;
1894 
1895 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1896 	if (buf & MAC_CR_EEE_EN_) {
1897 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1898 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1899 		edata->tx_lpi_timer = buf;
1900 	} else {
1901 		edata->tx_lpi_timer = 0;
1902 	}
1903 
1904 	ret = 0;
1905 exit:
1906 	usb_autopm_put_interface(dev->intf);
1907 
1908 	return ret;
1909 }
1910 
1911 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1912 {
1913 	struct lan78xx_net *dev = netdev_priv(net);
1914 	int ret;
1915 	u32 buf;
1916 
1917 	ret = usb_autopm_get_interface(dev->intf);
1918 	if (ret < 0)
1919 		return ret;
1920 
1921 	ret = phy_ethtool_set_eee(net->phydev, edata);
1922 	if (ret < 0)
1923 		goto out;
1924 
1925 	buf = (u32)edata->tx_lpi_timer;
1926 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1927 out:
1928 	usb_autopm_put_interface(dev->intf);
1929 
1930 	return ret;
1931 }
1932 
1933 static u32 lan78xx_get_link(struct net_device *net)
1934 {
1935 	u32 link;
1936 
1937 	mutex_lock(&net->phydev->lock);
1938 	phy_read_status(net->phydev);
1939 	link = net->phydev->link;
1940 	mutex_unlock(&net->phydev->lock);
1941 
1942 	return link;
1943 }
1944 
1945 static void lan78xx_get_drvinfo(struct net_device *net,
1946 				struct ethtool_drvinfo *info)
1947 {
1948 	struct lan78xx_net *dev = netdev_priv(net);
1949 
1950 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1951 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1952 }
1953 
1954 static u32 lan78xx_get_msglevel(struct net_device *net)
1955 {
1956 	struct lan78xx_net *dev = netdev_priv(net);
1957 
1958 	return dev->msg_enable;
1959 }
1960 
1961 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1962 {
1963 	struct lan78xx_net *dev = netdev_priv(net);
1964 
1965 	dev->msg_enable = level;
1966 }
1967 
1968 static int lan78xx_get_link_ksettings(struct net_device *net,
1969 				      struct ethtool_link_ksettings *cmd)
1970 {
1971 	struct lan78xx_net *dev = netdev_priv(net);
1972 	struct phy_device *phydev = net->phydev;
1973 	int ret;
1974 
1975 	ret = usb_autopm_get_interface(dev->intf);
1976 	if (ret < 0)
1977 		return ret;
1978 
1979 	phy_ethtool_ksettings_get(phydev, cmd);
1980 
1981 	usb_autopm_put_interface(dev->intf);
1982 
1983 	return ret;
1984 }
1985 
1986 static int lan78xx_set_link_ksettings(struct net_device *net,
1987 				      const struct ethtool_link_ksettings *cmd)
1988 {
1989 	struct lan78xx_net *dev = netdev_priv(net);
1990 	struct phy_device *phydev = net->phydev;
1991 	int ret = 0;
1992 	int temp;
1993 
1994 	ret = usb_autopm_get_interface(dev->intf);
1995 	if (ret < 0)
1996 		return ret;
1997 
1998 	/* change speed & duplex */
1999 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2000 
2001 	if (!cmd->base.autoneg) {
2002 		/* force link down */
2003 		temp = phy_read(phydev, MII_BMCR);
2004 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2005 		mdelay(1);
2006 		phy_write(phydev, MII_BMCR, temp);
2007 	}
2008 
2009 	usb_autopm_put_interface(dev->intf);
2010 
2011 	return ret;
2012 }
2013 
2014 static void lan78xx_get_pause(struct net_device *net,
2015 			      struct ethtool_pauseparam *pause)
2016 {
2017 	struct lan78xx_net *dev = netdev_priv(net);
2018 	struct phy_device *phydev = net->phydev;
2019 	struct ethtool_link_ksettings ecmd;
2020 
2021 	phy_ethtool_ksettings_get(phydev, &ecmd);
2022 
2023 	pause->autoneg = dev->fc_autoneg;
2024 
2025 	if (dev->fc_request_control & FLOW_CTRL_TX)
2026 		pause->tx_pause = 1;
2027 
2028 	if (dev->fc_request_control & FLOW_CTRL_RX)
2029 		pause->rx_pause = 1;
2030 }
2031 
2032 static int lan78xx_set_pause(struct net_device *net,
2033 			     struct ethtool_pauseparam *pause)
2034 {
2035 	struct lan78xx_net *dev = netdev_priv(net);
2036 	struct phy_device *phydev = net->phydev;
2037 	struct ethtool_link_ksettings ecmd;
2038 	int ret;
2039 
2040 	phy_ethtool_ksettings_get(phydev, &ecmd);
2041 
2042 	if (pause->autoneg && !ecmd.base.autoneg) {
2043 		ret = -EINVAL;
2044 		goto exit;
2045 	}
2046 
2047 	dev->fc_request_control = 0;
2048 	if (pause->rx_pause)
2049 		dev->fc_request_control |= FLOW_CTRL_RX;
2050 
2051 	if (pause->tx_pause)
2052 		dev->fc_request_control |= FLOW_CTRL_TX;
2053 
2054 	if (ecmd.base.autoneg) {
2055 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2056 		u32 mii_adv;
2057 
2058 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2059 				   ecmd.link_modes.advertising);
2060 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2061 				   ecmd.link_modes.advertising);
2062 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2063 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2064 		linkmode_or(ecmd.link_modes.advertising, fc,
2065 			    ecmd.link_modes.advertising);
2066 
2067 		phy_ethtool_ksettings_set(phydev, &ecmd);
2068 	}
2069 
2070 	dev->fc_autoneg = pause->autoneg;
2071 
2072 	ret = 0;
2073 exit:
2074 	return ret;
2075 }
2076 
2077 static int lan78xx_get_regs_len(struct net_device *netdev)
2078 {
2079 	return sizeof(lan78xx_regs);
2080 }
2081 
2082 static void
2083 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2084 		 void *buf)
2085 {
2086 	struct lan78xx_net *dev = netdev_priv(netdev);
2087 	unsigned int data_count = 0;
2088 	u32 *data = buf;
2089 	int i, ret;
2090 
2091 	/* Read Device/MAC registers */
2092 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2093 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2094 		if (ret < 0) {
2095 			netdev_warn(dev->net,
2096 				    "failed to read register 0x%08x\n",
2097 				    lan78xx_regs[i]);
2098 			goto clean_data;
2099 		}
2100 
2101 		data_count++;
2102 	}
2103 
2104 	return;
2105 
2106 clean_data:
2107 	memset(data, 0, data_count * sizeof(u32));
2108 }
2109 
2110 static const struct ethtool_ops lan78xx_ethtool_ops = {
2111 	.get_link	= lan78xx_get_link,
2112 	.nway_reset	= phy_ethtool_nway_reset,
2113 	.get_drvinfo	= lan78xx_get_drvinfo,
2114 	.get_msglevel	= lan78xx_get_msglevel,
2115 	.set_msglevel	= lan78xx_set_msglevel,
2116 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2117 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2118 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2119 	.get_ethtool_stats = lan78xx_get_stats,
2120 	.get_sset_count = lan78xx_get_sset_count,
2121 	.get_strings	= lan78xx_get_strings,
2122 	.get_wol	= lan78xx_get_wol,
2123 	.set_wol	= lan78xx_set_wol,
2124 	.get_ts_info	= ethtool_op_get_ts_info,
2125 	.get_eee	= lan78xx_get_eee,
2126 	.set_eee	= lan78xx_set_eee,
2127 	.get_pauseparam	= lan78xx_get_pause,
2128 	.set_pauseparam	= lan78xx_set_pause,
2129 	.get_link_ksettings = lan78xx_get_link_ksettings,
2130 	.set_link_ksettings = lan78xx_set_link_ksettings,
2131 	.get_regs_len	= lan78xx_get_regs_len,
2132 	.get_regs	= lan78xx_get_regs,
2133 };
2134 
2135 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2136 {
2137 	u32 addr_lo, addr_hi;
2138 	u8 addr[6];
2139 	int ret;
2140 
2141 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2142 	if (ret < 0)
2143 		return ret;
2144 
2145 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2146 	if (ret < 0)
2147 		return ret;
2148 
2149 	addr[0] = addr_lo & 0xFF;
2150 	addr[1] = (addr_lo >> 8) & 0xFF;
2151 	addr[2] = (addr_lo >> 16) & 0xFF;
2152 	addr[3] = (addr_lo >> 24) & 0xFF;
2153 	addr[4] = addr_hi & 0xFF;
2154 	addr[5] = (addr_hi >> 8) & 0xFF;
2155 
2156 	if (!is_valid_ether_addr(addr)) {
2157 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2158 			/* valid address present in Device Tree */
2159 			netif_dbg(dev, ifup, dev->net,
2160 				  "MAC address read from Device Tree");
2161 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2162 						 ETH_ALEN, addr) == 0) ||
2163 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2164 					      ETH_ALEN, addr) == 0)) &&
2165 			   is_valid_ether_addr(addr)) {
2166 			/* eeprom values are valid so use them */
2167 			netif_dbg(dev, ifup, dev->net,
2168 				  "MAC address read from EEPROM");
2169 		} else {
2170 			/* generate random MAC */
2171 			eth_random_addr(addr);
2172 			netif_dbg(dev, ifup, dev->net,
2173 				  "MAC address set to random addr");
2174 		}
2175 
2176 		addr_lo = addr[0] | (addr[1] << 8) |
2177 			  (addr[2] << 16) | (addr[3] << 24);
2178 		addr_hi = addr[4] | (addr[5] << 8);
2179 
2180 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2181 		if (ret < 0)
2182 			return ret;
2183 
2184 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2185 		if (ret < 0)
2186 			return ret;
2187 	}
2188 
2189 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2190 	if (ret < 0)
2191 		return ret;
2192 
2193 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2194 	if (ret < 0)
2195 		return ret;
2196 
2197 	eth_hw_addr_set(dev->net, addr);
2198 
2199 	return 0;
2200 }
2201 
2202 /* MDIO read and write wrappers for phylib */
2203 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2204 {
2205 	struct lan78xx_net *dev = bus->priv;
2206 	u32 val, addr;
2207 	int ret;
2208 
2209 	ret = usb_autopm_get_interface(dev->intf);
2210 	if (ret < 0)
2211 		return ret;
2212 
2213 	mutex_lock(&dev->mdiobus_mutex);
2214 
2215 	/* confirm MII not busy */
2216 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2217 	if (ret < 0)
2218 		goto done;
2219 
2220 	/* set the address, index & direction (read from PHY) */
2221 	addr = mii_access(phy_id, idx, MII_READ);
2222 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2223 	if (ret < 0)
2224 		goto done;
2225 
2226 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2227 	if (ret < 0)
2228 		goto done;
2229 
2230 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2231 	if (ret < 0)
2232 		goto done;
2233 
2234 	ret = (int)(val & 0xFFFF);
2235 
2236 done:
2237 	mutex_unlock(&dev->mdiobus_mutex);
2238 	usb_autopm_put_interface(dev->intf);
2239 
2240 	return ret;
2241 }
2242 
2243 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2244 				 u16 regval)
2245 {
2246 	struct lan78xx_net *dev = bus->priv;
2247 	u32 val, addr;
2248 	int ret;
2249 
2250 	ret = usb_autopm_get_interface(dev->intf);
2251 	if (ret < 0)
2252 		return ret;
2253 
2254 	mutex_lock(&dev->mdiobus_mutex);
2255 
2256 	/* confirm MII not busy */
2257 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2258 	if (ret < 0)
2259 		goto done;
2260 
2261 	val = (u32)regval;
2262 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2263 	if (ret < 0)
2264 		goto done;
2265 
2266 	/* set the address, index & direction (write to PHY) */
2267 	addr = mii_access(phy_id, idx, MII_WRITE);
2268 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2269 	if (ret < 0)
2270 		goto done;
2271 
2272 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2273 	if (ret < 0)
2274 		goto done;
2275 
2276 done:
2277 	mutex_unlock(&dev->mdiobus_mutex);
2278 	usb_autopm_put_interface(dev->intf);
2279 	return ret;
2280 }
2281 
2282 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2283 {
2284 	struct device_node *node;
2285 	int ret;
2286 
2287 	dev->mdiobus = mdiobus_alloc();
2288 	if (!dev->mdiobus) {
2289 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2290 		return -ENOMEM;
2291 	}
2292 
2293 	dev->mdiobus->priv = (void *)dev;
2294 	dev->mdiobus->read = lan78xx_mdiobus_read;
2295 	dev->mdiobus->write = lan78xx_mdiobus_write;
2296 	dev->mdiobus->name = "lan78xx-mdiobus";
2297 	dev->mdiobus->parent = &dev->udev->dev;
2298 
2299 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2300 		 dev->udev->bus->busnum, dev->udev->devnum);
2301 
2302 	switch (dev->chipid) {
2303 	case ID_REV_CHIP_ID_7800_:
2304 	case ID_REV_CHIP_ID_7850_:
2305 		/* set to internal PHY id */
2306 		dev->mdiobus->phy_mask = ~(1 << 1);
2307 		break;
2308 	case ID_REV_CHIP_ID_7801_:
2309 		/* scan thru PHYAD[2..0] */
2310 		dev->mdiobus->phy_mask = ~(0xFF);
2311 		break;
2312 	}
2313 
2314 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2315 	ret = of_mdiobus_register(dev->mdiobus, node);
2316 	of_node_put(node);
2317 	if (ret) {
2318 		netdev_err(dev->net, "can't register MDIO bus\n");
2319 		goto exit1;
2320 	}
2321 
2322 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2323 	return 0;
2324 exit1:
2325 	mdiobus_free(dev->mdiobus);
2326 	return ret;
2327 }
2328 
2329 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2330 {
2331 	mdiobus_unregister(dev->mdiobus);
2332 	mdiobus_free(dev->mdiobus);
2333 }
2334 
2335 static void lan78xx_link_status_change(struct net_device *net)
2336 {
2337 	struct lan78xx_net *dev = netdev_priv(net);
2338 	struct phy_device *phydev = net->phydev;
2339 	u32 data;
2340 	int ret;
2341 
2342 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2343 	if (ret < 0)
2344 		return;
2345 
2346 	if (phydev->enable_tx_lpi)
2347 		data |=  MAC_CR_EEE_EN_;
2348 	else
2349 		data &= ~MAC_CR_EEE_EN_;
2350 	lan78xx_write_reg(dev, MAC_CR, data);
2351 
2352 	phy_print_status(phydev);
2353 }
2354 
2355 static int irq_map(struct irq_domain *d, unsigned int irq,
2356 		   irq_hw_number_t hwirq)
2357 {
2358 	struct irq_domain_data *data = d->host_data;
2359 
2360 	irq_set_chip_data(irq, data);
2361 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2362 	irq_set_noprobe(irq);
2363 
2364 	return 0;
2365 }
2366 
2367 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2368 {
2369 	irq_set_chip_and_handler(irq, NULL, NULL);
2370 	irq_set_chip_data(irq, NULL);
2371 }
2372 
2373 static const struct irq_domain_ops chip_domain_ops = {
2374 	.map	= irq_map,
2375 	.unmap	= irq_unmap,
2376 };
2377 
2378 static void lan78xx_irq_mask(struct irq_data *irqd)
2379 {
2380 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2381 
2382 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2383 }
2384 
2385 static void lan78xx_irq_unmask(struct irq_data *irqd)
2386 {
2387 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2388 
2389 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2390 }
2391 
2392 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2393 {
2394 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2395 
2396 	mutex_lock(&data->irq_lock);
2397 }
2398 
2399 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2400 {
2401 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2402 	struct lan78xx_net *dev =
2403 			container_of(data, struct lan78xx_net, domain_data);
2404 	u32 buf;
2405 	int ret;
2406 
2407 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2408 	 * are only two callbacks executed in non-atomic contex.
2409 	 */
2410 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2411 	if (ret < 0)
2412 		goto irq_bus_sync_unlock;
2413 
2414 	if (buf != data->irqenable)
2415 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2416 
2417 irq_bus_sync_unlock:
2418 	if (ret < 0)
2419 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2420 			   ERR_PTR(ret));
2421 
2422 	mutex_unlock(&data->irq_lock);
2423 }
2424 
2425 static struct irq_chip lan78xx_irqchip = {
2426 	.name			= "lan78xx-irqs",
2427 	.irq_mask		= lan78xx_irq_mask,
2428 	.irq_unmask		= lan78xx_irq_unmask,
2429 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2430 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2431 };
2432 
2433 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2434 {
2435 	struct irq_domain *irqdomain;
2436 	unsigned int irqmap = 0;
2437 	u32 buf;
2438 	int ret = 0;
2439 
2440 	mutex_init(&dev->domain_data.irq_lock);
2441 
2442 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2443 	if (ret < 0)
2444 		return ret;
2445 
2446 	dev->domain_data.irqenable = buf;
2447 
2448 	dev->domain_data.irqchip = &lan78xx_irqchip;
2449 	dev->domain_data.irq_handler = handle_simple_irq;
2450 
2451 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2452 					     &chip_domain_ops, &dev->domain_data);
2453 	if (irqdomain) {
2454 		/* create mapping for PHY interrupt */
2455 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2456 		if (!irqmap) {
2457 			irq_domain_remove(irqdomain);
2458 
2459 			irqdomain = NULL;
2460 			ret = -EINVAL;
2461 		}
2462 	} else {
2463 		ret = -EINVAL;
2464 	}
2465 
2466 	dev->domain_data.irqdomain = irqdomain;
2467 	dev->domain_data.phyirq = irqmap;
2468 
2469 	return ret;
2470 }
2471 
2472 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2473 {
2474 	if (dev->domain_data.phyirq > 0) {
2475 		irq_dispose_mapping(dev->domain_data.phyirq);
2476 
2477 		if (dev->domain_data.irqdomain)
2478 			irq_domain_remove(dev->domain_data.irqdomain);
2479 	}
2480 	dev->domain_data.phyirq = 0;
2481 	dev->domain_data.irqdomain = NULL;
2482 }
2483 
2484 /**
2485  * lan78xx_configure_usb - Configure USB link power settings
2486  * @dev: pointer to the LAN78xx device structure
2487  * @speed: negotiated Ethernet link speed (in Mbps)
2488  *
2489  * This function configures U1/U2 link power management for SuperSpeed
2490  * USB devices based on the current Ethernet link speed. It uses the
2491  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2492  *
2493  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2494  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2495  *
2496  * Return: 0 on success or a negative error code on failure.
2497  */
2498 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2499 {
2500 	u32 mask, val;
2501 	int ret;
2502 
2503 	/* Only configure USB settings for SuperSpeed devices */
2504 	if (dev->udev->speed != USB_SPEED_SUPER)
2505 		return 0;
2506 
2507 	/* LAN7850 does not support USB 3.x */
2508 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2509 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2510 		return 0;
2511 	}
2512 
2513 	switch (speed) {
2514 	case SPEED_1000:
2515 		/* Disable U2, enable U1 */
2516 		ret = lan78xx_update_reg(dev, USB_CFG1,
2517 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2518 		if (ret < 0)
2519 			return ret;
2520 
2521 		return lan78xx_update_reg(dev, USB_CFG1,
2522 					  USB_CFG1_DEV_U1_INIT_EN_,
2523 					  USB_CFG1_DEV_U1_INIT_EN_);
2524 
2525 	case SPEED_100:
2526 	case SPEED_10:
2527 		/* Enable both U1 and U2 */
2528 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2529 		val = mask;
2530 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2531 
2532 	default:
2533 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2534 		return -EINVAL;
2535 	}
2536 }
2537 
2538 /**
2539  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2540  * @dev: pointer to the LAN78xx device structure
2541  * @tx_pause: enable transmission of pause frames
2542  * @rx_pause: enable reception of pause frames
2543  *
2544  * This function configures the LAN78xx flow control settings by writing
2545  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2546  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2547  * based on USB speed.
2548  *
2549  * The Pause Time field is measured in units of 512-bit times (quanta):
2550  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2551  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2552  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2553  *
2554  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2555  *   - RXUSED is the number of bytes used in the RX FIFO
2556  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2557  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2558  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2559  *
2560  * Thresholds differ by USB speed because available USB bandwidth
2561  * affects how fast packets can be drained from the RX FIFO:
2562  *   - USB 3.x (SuperSpeed):
2563  *       FLOW_ON  = 9216 bytes → 18 units
2564  *       FLOW_OFF = 4096 bytes →  8 units
2565  *   - USB 2.0 (High-Speed):
2566  *       FLOW_ON  = 8704 bytes → 17 units
2567  *       FLOW_OFF = 1024 bytes →  2 units
2568  *
2569  * Note: The FCT_FLOW register must be configured before enabling TX pause
2570  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2571  *
2572  * Return: 0 on success or a negative error code on failure.
2573  */
2574 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2575 					 bool tx_pause, bool rx_pause)
2576 {
2577 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2578 	const u32 pause_time_quanta = 65535;
2579 	u32 fct_flow = 0;
2580 	u32 flow = 0;
2581 	int ret;
2582 
2583 	/* Prepare MAC flow control bits */
2584 	if (tx_pause)
2585 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2586 
2587 	if (rx_pause)
2588 		flow |= FLOW_CR_RX_FCEN_;
2589 
2590 	/* Select RX FIFO thresholds based on USB speed
2591 	 *
2592 	 * FCT_FLOW layout:
2593 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2594 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2595 	 *   thresholds are expressed in units of 512 bytes
2596 	 */
2597 	switch (dev->udev->speed) {
2598 	case USB_SPEED_SUPER:
2599 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2600 		break;
2601 	case USB_SPEED_HIGH:
2602 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2603 		break;
2604 	default:
2605 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2606 			    dev->udev->speed);
2607 		return -EINVAL;
2608 	}
2609 
2610 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2611 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2612 	if (ret < 0)
2613 		return ret;
2614 
2615 	/* Step 2: Enable MAC pause functionality */
2616 	return lan78xx_write_reg(dev, FLOW, flow);
2617 }
2618 
2619 /**
2620  * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
2621  * @dev: LAN78xx device
2622  *
2623  * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
2624  * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
2625  * switch without a visible PHY.
2626  *
2627  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2628  */
2629 static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
2630 {
2631 	static const struct fixed_phy_status fphy_status = {
2632 		.link = 1,
2633 		.speed = SPEED_1000,
2634 		.duplex = DUPLEX_FULL,
2635 	};
2636 
2637 	netdev_info(dev->net,
2638 		    "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
2639 
2640 	return fixed_phy_register(&fphy_status, NULL);
2641 }
2642 
2643 /**
2644  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2645  * @dev: LAN78xx device structure
2646  *
2647  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2648  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2649  * sets dev->interface based on chip ID and detected PHY type.
2650  *
2651  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2652  */
2653 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2654 {
2655 	struct phy_device *phydev;
2656 
2657 	/* Attempt to locate a PHY on the MDIO bus */
2658 	phydev = phy_find_first(dev->mdiobus);
2659 
2660 	switch (dev->chipid) {
2661 	case ID_REV_CHIP_ID_7801_:
2662 		if (phydev) {
2663 			/* External RGMII PHY detected */
2664 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2665 			phydev->is_internal = false;
2666 
2667 			if (!phydev->drv)
2668 				netdev_warn(dev->net,
2669 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2670 
2671 			return phydev;
2672 		}
2673 
2674 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2675 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2676 		return lan78xx_register_fixed_phy(dev);
2677 
2678 	case ID_REV_CHIP_ID_7800_:
2679 	case ID_REV_CHIP_ID_7850_:
2680 		if (!phydev)
2681 			return ERR_PTR(-ENODEV);
2682 
2683 		/* These use internal GMII-connected PHY */
2684 		dev->interface = PHY_INTERFACE_MODE_GMII;
2685 		phydev->is_internal = true;
2686 		return phydev;
2687 
2688 	default:
2689 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2690 		return ERR_PTR(-ENODEV);
2691 	}
2692 }
2693 
2694 /**
2695  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2696  * @dev: LAN78xx device
2697  *
2698  * Configure MAC-side registers according to dev->interface, which should be
2699  * set by lan78xx_get_phy().
2700  *
2701  * - For PHY_INTERFACE_MODE_RGMII:
2702  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2703  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2704  *   connected to the KSZ9897 switch, and the link timing is expected to be
2705  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2706  *   assumed here.
2707  *
2708  * - For PHY_INTERFACE_MODE_RGMII_ID:
2709  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2710  *
2711  * - For GMII, no MAC-specific config is needed.
2712  *
2713  * Return: 0 on success or a negative error code.
2714  */
2715 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2716 {
2717 	int ret;
2718 
2719 	switch (dev->interface) {
2720 	case PHY_INTERFACE_MODE_RGMII:
2721 		/* Enable MAC-side TX clock delay */
2722 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2723 					MAC_RGMII_ID_TXC_DELAY_EN_);
2724 		if (ret < 0)
2725 			return ret;
2726 
2727 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2728 		if (ret < 0)
2729 			return ret;
2730 
2731 		ret = lan78xx_update_reg(dev, HW_CFG,
2732 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2733 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2734 		if (ret < 0)
2735 			return ret;
2736 
2737 		break;
2738 
2739 	case PHY_INTERFACE_MODE_RGMII_ID:
2740 		/* Disable MAC-side TXC delay, PHY provides it */
2741 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2742 		if (ret < 0)
2743 			return ret;
2744 
2745 		break;
2746 
2747 	case PHY_INTERFACE_MODE_GMII:
2748 		/* No MAC-specific configuration required */
2749 		break;
2750 
2751 	default:
2752 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2753 			    dev->interface);
2754 		break;
2755 	}
2756 
2757 	return 0;
2758 }
2759 
2760 /**
2761  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2762  * @dev: LAN78xx device
2763  * @phydev: PHY device (must be valid)
2764  *
2765  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2766  * the corresponding number of LEDs by writing to HW_CFG.
2767  *
2768  * This helper preserves the original logic, enabling up to 4 LEDs.
2769  * If the property is not present, this function does nothing.
2770  *
2771  * Return: 0 on success or a negative error code.
2772  */
2773 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2774 					  struct phy_device *phydev)
2775 {
2776 	struct device_node *np = phydev->mdio.dev.of_node;
2777 	u32 reg;
2778 	int len, ret;
2779 
2780 	if (!np)
2781 		return 0;
2782 
2783 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2784 					      sizeof(u32));
2785 	if (len < 0)
2786 		return 0;
2787 
2788 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2789 	if (ret < 0)
2790 		return ret;
2791 
2792 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2793 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2794 
2795 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2796 	       (len > 1) * HW_CFG_LED1_EN_ |
2797 	       (len > 2) * HW_CFG_LED2_EN_ |
2798 	       (len > 3) * HW_CFG_LED3_EN_;
2799 
2800 	return lan78xx_write_reg(dev, HW_CFG, reg);
2801 }
2802 
2803 static int lan78xx_phy_init(struct lan78xx_net *dev)
2804 {
2805 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2806 	int ret;
2807 	u32 mii_adv;
2808 	struct phy_device *phydev;
2809 
2810 	phydev = lan78xx_get_phy(dev);
2811 	if (IS_ERR(phydev))
2812 		return PTR_ERR(phydev);
2813 
2814 	ret = lan78xx_mac_prepare_for_phy(dev);
2815 	if (ret < 0)
2816 		goto free_phy;
2817 
2818 	/* if phyirq is not set, use polling mode in phylib */
2819 	if (dev->domain_data.phyirq > 0)
2820 		phydev->irq = dev->domain_data.phyirq;
2821 	else
2822 		phydev->irq = PHY_POLL;
2823 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2824 
2825 	/* set to AUTOMDIX */
2826 	phydev->mdix = ETH_TP_MDI_AUTO;
2827 
2828 	ret = phy_connect_direct(dev->net, phydev,
2829 				 lan78xx_link_status_change,
2830 				 dev->interface);
2831 	if (ret) {
2832 		netdev_err(dev->net, "can't attach PHY to %s\n",
2833 			   dev->mdiobus->id);
2834 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2835 			if (phy_is_pseudo_fixed_link(phydev)) {
2836 				fixed_phy_unregister(phydev);
2837 				phy_device_free(phydev);
2838 			}
2839 		}
2840 		return -EIO;
2841 	}
2842 
2843 	/* MAC doesn't support 1000T Half */
2844 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2845 
2846 	/* support both flow controls */
2847 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2848 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2849 			   phydev->advertising);
2850 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2851 			   phydev->advertising);
2852 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2853 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2854 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2855 
2856 	phy_support_eee(phydev);
2857 
2858 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2859 	if (ret)
2860 		goto free_phy;
2861 
2862 	genphy_config_aneg(phydev);
2863 
2864 	dev->fc_autoneg = phydev->autoneg;
2865 
2866 	return 0;
2867 
2868 free_phy:
2869 	if (phy_is_pseudo_fixed_link(phydev)) {
2870 		fixed_phy_unregister(phydev);
2871 		phy_device_free(phydev);
2872 	}
2873 
2874 	return ret;
2875 }
2876 
2877 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2878 {
2879 	bool rxenabled;
2880 	u32 buf;
2881 	int ret;
2882 
2883 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2884 	if (ret < 0)
2885 		return ret;
2886 
2887 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2888 
2889 	if (rxenabled) {
2890 		buf &= ~MAC_RX_RXEN_;
2891 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2892 		if (ret < 0)
2893 			return ret;
2894 	}
2895 
2896 	/* add 4 to size for FCS */
2897 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2898 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2899 
2900 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2901 	if (ret < 0)
2902 		return ret;
2903 
2904 	if (rxenabled) {
2905 		buf |= MAC_RX_RXEN_;
2906 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2907 		if (ret < 0)
2908 			return ret;
2909 	}
2910 
2911 	return 0;
2912 }
2913 
2914 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2915 {
2916 	struct sk_buff *skb;
2917 	unsigned long flags;
2918 	int count = 0;
2919 
2920 	spin_lock_irqsave(&q->lock, flags);
2921 	while (!skb_queue_empty(q)) {
2922 		struct skb_data	*entry;
2923 		struct urb *urb;
2924 		int ret;
2925 
2926 		skb_queue_walk(q, skb) {
2927 			entry = (struct skb_data *)skb->cb;
2928 			if (entry->state != unlink_start)
2929 				goto found;
2930 		}
2931 		break;
2932 found:
2933 		entry->state = unlink_start;
2934 		urb = entry->urb;
2935 
2936 		/* Get reference count of the URB to avoid it to be
2937 		 * freed during usb_unlink_urb, which may trigger
2938 		 * use-after-free problem inside usb_unlink_urb since
2939 		 * usb_unlink_urb is always racing with .complete
2940 		 * handler(include defer_bh).
2941 		 */
2942 		usb_get_urb(urb);
2943 		spin_unlock_irqrestore(&q->lock, flags);
2944 		/* during some PM-driven resume scenarios,
2945 		 * these (async) unlinks complete immediately
2946 		 */
2947 		ret = usb_unlink_urb(urb);
2948 		if (ret != -EINPROGRESS && ret != 0)
2949 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2950 		else
2951 			count++;
2952 		usb_put_urb(urb);
2953 		spin_lock_irqsave(&q->lock, flags);
2954 	}
2955 	spin_unlock_irqrestore(&q->lock, flags);
2956 	return count;
2957 }
2958 
2959 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2960 {
2961 	struct lan78xx_net *dev = netdev_priv(netdev);
2962 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2963 	int ret;
2964 
2965 	/* no second zero-length packet read wanted after mtu-sized packets */
2966 	if ((max_frame_len % dev->maxpacket) == 0)
2967 		return -EDOM;
2968 
2969 	ret = usb_autopm_get_interface(dev->intf);
2970 	if (ret < 0)
2971 		return ret;
2972 
2973 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2974 	if (ret < 0)
2975 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2976 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2977 	else
2978 		WRITE_ONCE(netdev->mtu, new_mtu);
2979 
2980 	usb_autopm_put_interface(dev->intf);
2981 
2982 	return ret;
2983 }
2984 
2985 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2986 {
2987 	struct lan78xx_net *dev = netdev_priv(netdev);
2988 	struct sockaddr *addr = p;
2989 	u32 addr_lo, addr_hi;
2990 	int ret;
2991 
2992 	if (netif_running(netdev))
2993 		return -EBUSY;
2994 
2995 	if (!is_valid_ether_addr(addr->sa_data))
2996 		return -EADDRNOTAVAIL;
2997 
2998 	eth_hw_addr_set(netdev, addr->sa_data);
2999 
3000 	addr_lo = netdev->dev_addr[0] |
3001 		  netdev->dev_addr[1] << 8 |
3002 		  netdev->dev_addr[2] << 16 |
3003 		  netdev->dev_addr[3] << 24;
3004 	addr_hi = netdev->dev_addr[4] |
3005 		  netdev->dev_addr[5] << 8;
3006 
3007 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3008 	if (ret < 0)
3009 		return ret;
3010 
3011 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3012 	if (ret < 0)
3013 		return ret;
3014 
3015 	/* Added to support MAC address changes */
3016 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3017 	if (ret < 0)
3018 		return ret;
3019 
3020 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3021 }
3022 
3023 /* Enable or disable Rx checksum offload engine */
3024 static int lan78xx_set_features(struct net_device *netdev,
3025 				netdev_features_t features)
3026 {
3027 	struct lan78xx_net *dev = netdev_priv(netdev);
3028 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3029 	unsigned long flags;
3030 
3031 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3032 
3033 	if (features & NETIF_F_RXCSUM) {
3034 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3035 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3036 	} else {
3037 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3038 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3039 	}
3040 
3041 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3042 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3043 	else
3044 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3045 
3046 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3047 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3048 	else
3049 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3050 
3051 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3052 
3053 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3054 }
3055 
3056 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3057 {
3058 	struct lan78xx_priv *pdata =
3059 			container_of(param, struct lan78xx_priv, set_vlan);
3060 	struct lan78xx_net *dev = pdata->dev;
3061 
3062 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3063 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3064 }
3065 
3066 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3067 				   __be16 proto, u16 vid)
3068 {
3069 	struct lan78xx_net *dev = netdev_priv(netdev);
3070 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3071 	u16 vid_bit_index;
3072 	u16 vid_dword_index;
3073 
3074 	vid_dword_index = (vid >> 5) & 0x7F;
3075 	vid_bit_index = vid & 0x1F;
3076 
3077 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3078 
3079 	/* defer register writes to a sleepable context */
3080 	schedule_work(&pdata->set_vlan);
3081 
3082 	return 0;
3083 }
3084 
3085 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3086 				    __be16 proto, u16 vid)
3087 {
3088 	struct lan78xx_net *dev = netdev_priv(netdev);
3089 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3090 	u16 vid_bit_index;
3091 	u16 vid_dword_index;
3092 
3093 	vid_dword_index = (vid >> 5) & 0x7F;
3094 	vid_bit_index = vid & 0x1F;
3095 
3096 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3097 
3098 	/* defer register writes to a sleepable context */
3099 	schedule_work(&pdata->set_vlan);
3100 
3101 	return 0;
3102 }
3103 
3104 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3105 {
3106 	u32 regs[6] = { 0 };
3107 	int ret;
3108 	u32 buf;
3109 
3110 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3111 	if (ret < 0)
3112 		goto init_ltm_failed;
3113 
3114 	if (buf & USB_CFG1_LTM_ENABLE_) {
3115 		u8 temp[2];
3116 		/* Get values from EEPROM first */
3117 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3118 			if (temp[0] == 24) {
3119 				ret = lan78xx_read_raw_eeprom(dev,
3120 							      temp[1] * 2,
3121 							      24,
3122 							      (u8 *)regs);
3123 				if (ret < 0)
3124 					return ret;
3125 			}
3126 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3127 			if (temp[0] == 24) {
3128 				ret = lan78xx_read_raw_otp(dev,
3129 							   temp[1] * 2,
3130 							   24,
3131 							   (u8 *)regs);
3132 				if (ret < 0)
3133 					return ret;
3134 			}
3135 		}
3136 	}
3137 
3138 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3139 	if (ret < 0)
3140 		goto init_ltm_failed;
3141 
3142 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3143 	if (ret < 0)
3144 		goto init_ltm_failed;
3145 
3146 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3147 	if (ret < 0)
3148 		goto init_ltm_failed;
3149 
3150 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3151 	if (ret < 0)
3152 		goto init_ltm_failed;
3153 
3154 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3155 	if (ret < 0)
3156 		goto init_ltm_failed;
3157 
3158 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3159 	if (ret < 0)
3160 		goto init_ltm_failed;
3161 
3162 	return 0;
3163 
3164 init_ltm_failed:
3165 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3166 	return ret;
3167 }
3168 
3169 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3170 {
3171 	int result = 0;
3172 
3173 	switch (dev->udev->speed) {
3174 	case USB_SPEED_SUPER:
3175 		dev->rx_urb_size = RX_SS_URB_SIZE;
3176 		dev->tx_urb_size = TX_SS_URB_SIZE;
3177 		dev->n_rx_urbs = RX_SS_URB_NUM;
3178 		dev->n_tx_urbs = TX_SS_URB_NUM;
3179 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3180 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3181 		break;
3182 	case USB_SPEED_HIGH:
3183 		dev->rx_urb_size = RX_HS_URB_SIZE;
3184 		dev->tx_urb_size = TX_HS_URB_SIZE;
3185 		dev->n_rx_urbs = RX_HS_URB_NUM;
3186 		dev->n_tx_urbs = TX_HS_URB_NUM;
3187 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3188 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3189 		break;
3190 	case USB_SPEED_FULL:
3191 		dev->rx_urb_size = RX_FS_URB_SIZE;
3192 		dev->tx_urb_size = TX_FS_URB_SIZE;
3193 		dev->n_rx_urbs = RX_FS_URB_NUM;
3194 		dev->n_tx_urbs = TX_FS_URB_NUM;
3195 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3196 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3197 		break;
3198 	default:
3199 		netdev_warn(dev->net, "USB bus speed not supported\n");
3200 		result = -EIO;
3201 		break;
3202 	}
3203 
3204 	return result;
3205 }
3206 
3207 static int lan78xx_reset(struct lan78xx_net *dev)
3208 {
3209 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3210 	unsigned long timeout;
3211 	int ret;
3212 	u32 buf;
3213 	u8 sig;
3214 
3215 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3216 	if (ret < 0)
3217 		return ret;
3218 
3219 	buf |= HW_CFG_LRST_;
3220 
3221 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3222 	if (ret < 0)
3223 		return ret;
3224 
3225 	timeout = jiffies + HZ;
3226 	do {
3227 		mdelay(1);
3228 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3229 		if (ret < 0)
3230 			return ret;
3231 
3232 		if (time_after(jiffies, timeout)) {
3233 			netdev_warn(dev->net,
3234 				    "timeout on completion of LiteReset");
3235 			ret = -ETIMEDOUT;
3236 			return ret;
3237 		}
3238 	} while (buf & HW_CFG_LRST_);
3239 
3240 	ret = lan78xx_init_mac_address(dev);
3241 	if (ret < 0)
3242 		return ret;
3243 
3244 	/* save DEVID for later usage */
3245 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3246 	if (ret < 0)
3247 		return ret;
3248 
3249 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3250 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3251 
3252 	/* Respond to the IN token with a NAK */
3253 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3254 	if (ret < 0)
3255 		return ret;
3256 
3257 	buf |= USB_CFG_BIR_;
3258 
3259 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3260 	if (ret < 0)
3261 		return ret;
3262 
3263 	/* Init LTM */
3264 	ret = lan78xx_init_ltm(dev);
3265 	if (ret < 0)
3266 		return ret;
3267 
3268 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3269 	if (ret < 0)
3270 		return ret;
3271 
3272 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3273 	if (ret < 0)
3274 		return ret;
3275 
3276 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3277 	if (ret < 0)
3278 		return ret;
3279 
3280 	buf |= HW_CFG_MEF_;
3281 	buf |= HW_CFG_CLK125_EN_;
3282 	buf |= HW_CFG_REFCLK25_EN_;
3283 
3284 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3285 	if (ret < 0)
3286 		return ret;
3287 
3288 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3289 	if (ret < 0)
3290 		return ret;
3291 
3292 	buf |= USB_CFG_BCE_;
3293 
3294 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3295 	if (ret < 0)
3296 		return ret;
3297 
3298 	/* set FIFO sizes */
3299 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3300 
3301 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3302 	if (ret < 0)
3303 		return ret;
3304 
3305 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3306 
3307 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3308 	if (ret < 0)
3309 		return ret;
3310 
3311 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3312 	if (ret < 0)
3313 		return ret;
3314 
3315 	ret = lan78xx_write_reg(dev, FLOW, 0);
3316 	if (ret < 0)
3317 		return ret;
3318 
3319 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3320 	if (ret < 0)
3321 		return ret;
3322 
3323 	/* Don't need rfe_ctl_lock during initialisation */
3324 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3325 	if (ret < 0)
3326 		return ret;
3327 
3328 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3329 
3330 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3331 	if (ret < 0)
3332 		return ret;
3333 
3334 	/* Enable or disable checksum offload engines */
3335 	ret = lan78xx_set_features(dev->net, dev->net->features);
3336 	if (ret < 0)
3337 		return ret;
3338 
3339 	lan78xx_set_multicast(dev->net);
3340 
3341 	/* reset PHY */
3342 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3343 	if (ret < 0)
3344 		return ret;
3345 
3346 	buf |= PMT_CTL_PHY_RST_;
3347 
3348 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3349 	if (ret < 0)
3350 		return ret;
3351 
3352 	timeout = jiffies + HZ;
3353 	do {
3354 		mdelay(1);
3355 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3356 		if (ret < 0)
3357 			return ret;
3358 
3359 		if (time_after(jiffies, timeout)) {
3360 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3361 			ret = -ETIMEDOUT;
3362 			return ret;
3363 		}
3364 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3365 
3366 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3367 	if (ret < 0)
3368 		return ret;
3369 
3370 	/* LAN7801 only has RGMII mode */
3371 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3372 		buf &= ~MAC_CR_GMII_EN_;
3373 		/* Enable Auto Duplex and Auto speed */
3374 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3375 	}
3376 
3377 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3378 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3379 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3380 		if (!ret && sig != EEPROM_INDICATOR) {
3381 			/* Implies there is no external eeprom. Set mac speed */
3382 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3383 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3384 		}
3385 	}
3386 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3387 	if (ret < 0)
3388 		return ret;
3389 
3390 	ret = lan78xx_set_rx_max_frame_length(dev,
3391 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3392 
3393 	return ret;
3394 }
3395 
3396 static void lan78xx_init_stats(struct lan78xx_net *dev)
3397 {
3398 	u32 *p;
3399 	int i;
3400 
3401 	/* initialize for stats update
3402 	 * some counters are 20bits and some are 32bits
3403 	 */
3404 	p = (u32 *)&dev->stats.rollover_max;
3405 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3406 		p[i] = 0xFFFFF;
3407 
3408 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3409 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3410 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3411 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3418 
3419 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3420 }
3421 
3422 static int lan78xx_open(struct net_device *net)
3423 {
3424 	struct lan78xx_net *dev = netdev_priv(net);
3425 	int ret;
3426 
3427 	netif_dbg(dev, ifup, dev->net, "open device");
3428 
3429 	ret = usb_autopm_get_interface(dev->intf);
3430 	if (ret < 0)
3431 		return ret;
3432 
3433 	mutex_lock(&dev->dev_mutex);
3434 
3435 	phy_start(net->phydev);
3436 
3437 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3438 
3439 	/* for Link Check */
3440 	if (dev->urb_intr) {
3441 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3442 		if (ret < 0) {
3443 			netif_err(dev, ifup, dev->net,
3444 				  "intr submit %d\n", ret);
3445 			goto done;
3446 		}
3447 	}
3448 
3449 	ret = lan78xx_flush_rx_fifo(dev);
3450 	if (ret < 0)
3451 		goto done;
3452 	ret = lan78xx_flush_tx_fifo(dev);
3453 	if (ret < 0)
3454 		goto done;
3455 
3456 	ret = lan78xx_start_tx_path(dev);
3457 	if (ret < 0)
3458 		goto done;
3459 	ret = lan78xx_start_rx_path(dev);
3460 	if (ret < 0)
3461 		goto done;
3462 
3463 	lan78xx_init_stats(dev);
3464 
3465 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3466 
3467 	netif_start_queue(net);
3468 
3469 	dev->link_on = false;
3470 
3471 	napi_enable(&dev->napi);
3472 
3473 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3474 done:
3475 	mutex_unlock(&dev->dev_mutex);
3476 
3477 	if (ret < 0)
3478 		usb_autopm_put_interface(dev->intf);
3479 
3480 	return ret;
3481 }
3482 
3483 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3484 {
3485 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3486 	DECLARE_WAITQUEUE(wait, current);
3487 	int temp;
3488 
3489 	/* ensure there are no more active urbs */
3490 	add_wait_queue(&unlink_wakeup, &wait);
3491 	set_current_state(TASK_UNINTERRUPTIBLE);
3492 	dev->wait = &unlink_wakeup;
3493 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3494 
3495 	/* maybe wait for deletions to finish. */
3496 	while (!skb_queue_empty(&dev->rxq) ||
3497 	       !skb_queue_empty(&dev->txq)) {
3498 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3499 		set_current_state(TASK_UNINTERRUPTIBLE);
3500 		netif_dbg(dev, ifdown, dev->net,
3501 			  "waited for %d urb completions", temp);
3502 	}
3503 	set_current_state(TASK_RUNNING);
3504 	dev->wait = NULL;
3505 	remove_wait_queue(&unlink_wakeup, &wait);
3506 
3507 	/* empty Rx done, Rx overflow and Tx pend queues
3508 	 */
3509 	while (!skb_queue_empty(&dev->rxq_done)) {
3510 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3511 
3512 		lan78xx_release_rx_buf(dev, skb);
3513 	}
3514 
3515 	skb_queue_purge(&dev->rxq_overflow);
3516 	skb_queue_purge(&dev->txq_pend);
3517 }
3518 
3519 static int lan78xx_stop(struct net_device *net)
3520 {
3521 	struct lan78xx_net *dev = netdev_priv(net);
3522 
3523 	netif_dbg(dev, ifup, dev->net, "stop device");
3524 
3525 	mutex_lock(&dev->dev_mutex);
3526 
3527 	if (timer_pending(&dev->stat_monitor))
3528 		timer_delete_sync(&dev->stat_monitor);
3529 
3530 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3531 	netif_stop_queue(net);
3532 	napi_disable(&dev->napi);
3533 
3534 	lan78xx_terminate_urbs(dev);
3535 
3536 	netif_info(dev, ifdown, dev->net,
3537 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3538 		   net->stats.rx_packets, net->stats.tx_packets,
3539 		   net->stats.rx_errors, net->stats.tx_errors);
3540 
3541 	/* ignore errors that occur stopping the Tx and Rx data paths */
3542 	lan78xx_stop_tx_path(dev);
3543 	lan78xx_stop_rx_path(dev);
3544 
3545 	if (net->phydev)
3546 		phy_stop(net->phydev);
3547 
3548 	usb_kill_urb(dev->urb_intr);
3549 
3550 	/* deferred work (task, timer, softirq) must also stop.
3551 	 * can't flush_scheduled_work() until we drop rtnl (later),
3552 	 * else workers could deadlock; so make workers a NOP.
3553 	 */
3554 	clear_bit(EVENT_TX_HALT, &dev->flags);
3555 	clear_bit(EVENT_RX_HALT, &dev->flags);
3556 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3557 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3558 
3559 	cancel_delayed_work_sync(&dev->wq);
3560 
3561 	usb_autopm_put_interface(dev->intf);
3562 
3563 	mutex_unlock(&dev->dev_mutex);
3564 
3565 	return 0;
3566 }
3567 
3568 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3569 			       struct sk_buff_head *list, enum skb_state state)
3570 {
3571 	unsigned long flags;
3572 	enum skb_state old_state;
3573 	struct skb_data *entry = (struct skb_data *)skb->cb;
3574 
3575 	spin_lock_irqsave(&list->lock, flags);
3576 	old_state = entry->state;
3577 	entry->state = state;
3578 
3579 	__skb_unlink(skb, list);
3580 	spin_unlock(&list->lock);
3581 	spin_lock(&dev->rxq_done.lock);
3582 
3583 	__skb_queue_tail(&dev->rxq_done, skb);
3584 	if (skb_queue_len(&dev->rxq_done) == 1)
3585 		napi_schedule(&dev->napi);
3586 
3587 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3588 
3589 	return old_state;
3590 }
3591 
3592 static void tx_complete(struct urb *urb)
3593 {
3594 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3595 	struct skb_data *entry = (struct skb_data *)skb->cb;
3596 	struct lan78xx_net *dev = entry->dev;
3597 
3598 	if (urb->status == 0) {
3599 		dev->net->stats.tx_packets += entry->num_of_packet;
3600 		dev->net->stats.tx_bytes += entry->length;
3601 	} else {
3602 		dev->net->stats.tx_errors += entry->num_of_packet;
3603 
3604 		switch (urb->status) {
3605 		case -EPIPE:
3606 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3607 			break;
3608 
3609 		/* software-driven interface shutdown */
3610 		case -ECONNRESET:
3611 		case -ESHUTDOWN:
3612 			netif_dbg(dev, tx_err, dev->net,
3613 				  "tx err interface gone %d\n",
3614 				  entry->urb->status);
3615 			break;
3616 
3617 		case -EPROTO:
3618 		case -ETIME:
3619 		case -EILSEQ:
3620 			netif_stop_queue(dev->net);
3621 			netif_dbg(dev, tx_err, dev->net,
3622 				  "tx err queue stopped %d\n",
3623 				  entry->urb->status);
3624 			break;
3625 		default:
3626 			netif_dbg(dev, tx_err, dev->net,
3627 				  "unknown tx err %d\n",
3628 				  entry->urb->status);
3629 			break;
3630 		}
3631 	}
3632 
3633 	usb_autopm_put_interface_async(dev->intf);
3634 
3635 	skb_unlink(skb, &dev->txq);
3636 
3637 	lan78xx_release_tx_buf(dev, skb);
3638 
3639 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3640 	 */
3641 	if (skb_queue_empty(&dev->txq) &&
3642 	    !skb_queue_empty(&dev->txq_pend))
3643 		napi_schedule(&dev->napi);
3644 }
3645 
3646 static void lan78xx_queue_skb(struct sk_buff_head *list,
3647 			      struct sk_buff *newsk, enum skb_state state)
3648 {
3649 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3650 
3651 	__skb_queue_tail(list, newsk);
3652 	entry->state = state;
3653 }
3654 
3655 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3656 {
3657 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3658 }
3659 
3660 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3661 {
3662 	return dev->tx_pend_data_len;
3663 }
3664 
3665 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3666 				    struct sk_buff *skb,
3667 				    unsigned int *tx_pend_data_len)
3668 {
3669 	unsigned long flags;
3670 
3671 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3672 
3673 	__skb_queue_tail(&dev->txq_pend, skb);
3674 
3675 	dev->tx_pend_data_len += skb->len;
3676 	*tx_pend_data_len = dev->tx_pend_data_len;
3677 
3678 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3679 }
3680 
3681 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3682 					 struct sk_buff *skb,
3683 					 unsigned int *tx_pend_data_len)
3684 {
3685 	unsigned long flags;
3686 
3687 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3688 
3689 	__skb_queue_head(&dev->txq_pend, skb);
3690 
3691 	dev->tx_pend_data_len += skb->len;
3692 	*tx_pend_data_len = dev->tx_pend_data_len;
3693 
3694 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3695 }
3696 
3697 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3698 				    struct sk_buff **skb,
3699 				    unsigned int *tx_pend_data_len)
3700 {
3701 	unsigned long flags;
3702 
3703 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3704 
3705 	*skb = __skb_dequeue(&dev->txq_pend);
3706 	if (*skb)
3707 		dev->tx_pend_data_len -= (*skb)->len;
3708 	*tx_pend_data_len = dev->tx_pend_data_len;
3709 
3710 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3711 }
3712 
3713 static netdev_tx_t
3714 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3715 {
3716 	struct lan78xx_net *dev = netdev_priv(net);
3717 	unsigned int tx_pend_data_len;
3718 
3719 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3720 		schedule_delayed_work(&dev->wq, 0);
3721 
3722 	skb_tx_timestamp(skb);
3723 
3724 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3725 
3726 	/* Set up a Tx URB if none is in progress */
3727 
3728 	if (skb_queue_empty(&dev->txq))
3729 		napi_schedule(&dev->napi);
3730 
3731 	/* Stop stack Tx queue if we have enough data to fill
3732 	 * all the free Tx URBs.
3733 	 */
3734 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3735 		netif_stop_queue(net);
3736 
3737 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3738 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3739 
3740 		/* Kick off transmission of pending data */
3741 
3742 		if (!skb_queue_empty(&dev->txq_free))
3743 			napi_schedule(&dev->napi);
3744 	}
3745 
3746 	return NETDEV_TX_OK;
3747 }
3748 
3749 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3750 {
3751 	struct lan78xx_priv *pdata = NULL;
3752 	int ret;
3753 	int i;
3754 
3755 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3756 
3757 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3758 	if (!pdata) {
3759 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3760 		return -ENOMEM;
3761 	}
3762 
3763 	pdata->dev = dev;
3764 
3765 	spin_lock_init(&pdata->rfe_ctl_lock);
3766 	mutex_init(&pdata->dataport_mutex);
3767 
3768 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3769 
3770 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3771 		pdata->vlan_table[i] = 0;
3772 
3773 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3774 
3775 	dev->net->features = 0;
3776 
3777 	if (DEFAULT_TX_CSUM_ENABLE)
3778 		dev->net->features |= NETIF_F_HW_CSUM;
3779 
3780 	if (DEFAULT_RX_CSUM_ENABLE)
3781 		dev->net->features |= NETIF_F_RXCSUM;
3782 
3783 	if (DEFAULT_TSO_CSUM_ENABLE)
3784 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3785 
3786 	if (DEFAULT_VLAN_RX_OFFLOAD)
3787 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3788 
3789 	if (DEFAULT_VLAN_FILTER_ENABLE)
3790 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3791 
3792 	dev->net->hw_features = dev->net->features;
3793 
3794 	ret = lan78xx_setup_irq_domain(dev);
3795 	if (ret < 0) {
3796 		netdev_warn(dev->net,
3797 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3798 		goto out1;
3799 	}
3800 
3801 	/* Init all registers */
3802 	ret = lan78xx_reset(dev);
3803 	if (ret) {
3804 		netdev_warn(dev->net, "Registers INIT FAILED....");
3805 		goto out2;
3806 	}
3807 
3808 	ret = lan78xx_mdio_init(dev);
3809 	if (ret) {
3810 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3811 		goto out2;
3812 	}
3813 
3814 	dev->net->flags |= IFF_MULTICAST;
3815 
3816 	pdata->wol = WAKE_MAGIC;
3817 
3818 	return ret;
3819 
3820 out2:
3821 	lan78xx_remove_irq_domain(dev);
3822 
3823 out1:
3824 	netdev_warn(dev->net, "Bind routine FAILED");
3825 	cancel_work_sync(&pdata->set_multicast);
3826 	cancel_work_sync(&pdata->set_vlan);
3827 	kfree(pdata);
3828 	return ret;
3829 }
3830 
3831 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3832 {
3833 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3834 
3835 	lan78xx_remove_irq_domain(dev);
3836 
3837 	lan78xx_remove_mdio(dev);
3838 
3839 	if (pdata) {
3840 		cancel_work_sync(&pdata->set_multicast);
3841 		cancel_work_sync(&pdata->set_vlan);
3842 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3843 		kfree(pdata);
3844 		pdata = NULL;
3845 		dev->data[0] = 0;
3846 	}
3847 }
3848 
3849 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3850 				    struct sk_buff *skb,
3851 				    u32 rx_cmd_a, u32 rx_cmd_b)
3852 {
3853 	/* HW Checksum offload appears to be flawed if used when not stripping
3854 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3855 	 */
3856 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3857 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3858 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3859 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3860 		skb->ip_summed = CHECKSUM_NONE;
3861 	} else {
3862 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3863 		skb->ip_summed = CHECKSUM_COMPLETE;
3864 	}
3865 }
3866 
3867 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3868 				    struct sk_buff *skb,
3869 				    u32 rx_cmd_a, u32 rx_cmd_b)
3870 {
3871 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3872 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3873 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3874 				       (rx_cmd_b & 0xffff));
3875 }
3876 
3877 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3878 {
3879 	dev->net->stats.rx_packets++;
3880 	dev->net->stats.rx_bytes += skb->len;
3881 
3882 	skb->protocol = eth_type_trans(skb, dev->net);
3883 
3884 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3885 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3886 	memset(skb->cb, 0, sizeof(struct skb_data));
3887 
3888 	if (skb_defer_rx_timestamp(skb))
3889 		return;
3890 
3891 	napi_gro_receive(&dev->napi, skb);
3892 }
3893 
3894 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3895 		      int budget, int *work_done)
3896 {
3897 	if (skb->len < RX_SKB_MIN_LEN)
3898 		return 0;
3899 
3900 	/* Extract frames from the URB buffer and pass each one to
3901 	 * the stack in a new NAPI SKB.
3902 	 */
3903 	while (skb->len > 0) {
3904 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3905 		u16 rx_cmd_c;
3906 		unsigned char *packet;
3907 
3908 		rx_cmd_a = get_unaligned_le32(skb->data);
3909 		skb_pull(skb, sizeof(rx_cmd_a));
3910 
3911 		rx_cmd_b = get_unaligned_le32(skb->data);
3912 		skb_pull(skb, sizeof(rx_cmd_b));
3913 
3914 		rx_cmd_c = get_unaligned_le16(skb->data);
3915 		skb_pull(skb, sizeof(rx_cmd_c));
3916 
3917 		packet = skb->data;
3918 
3919 		/* get the packet length */
3920 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3921 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3922 
3923 		if (unlikely(size > skb->len)) {
3924 			netif_dbg(dev, rx_err, dev->net,
3925 				  "size err rx_cmd_a=0x%08x\n",
3926 				  rx_cmd_a);
3927 			return 0;
3928 		}
3929 
3930 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3931 			netif_dbg(dev, rx_err, dev->net,
3932 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3933 		} else {
3934 			u32 frame_len;
3935 			struct sk_buff *skb2;
3936 
3937 			if (unlikely(size < ETH_FCS_LEN)) {
3938 				netif_dbg(dev, rx_err, dev->net,
3939 					  "size err rx_cmd_a=0x%08x\n",
3940 					  rx_cmd_a);
3941 				return 0;
3942 			}
3943 
3944 			frame_len = size - ETH_FCS_LEN;
3945 
3946 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3947 			if (!skb2)
3948 				return 0;
3949 
3950 			memcpy(skb2->data, packet, frame_len);
3951 
3952 			skb_put(skb2, frame_len);
3953 
3954 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3955 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3956 
3957 			/* Processing of the URB buffer must complete once
3958 			 * it has started. If the NAPI work budget is exhausted
3959 			 * while frames remain they are added to the overflow
3960 			 * queue for delivery in the next NAPI polling cycle.
3961 			 */
3962 			if (*work_done < budget) {
3963 				lan78xx_skb_return(dev, skb2);
3964 				++(*work_done);
3965 			} else {
3966 				skb_queue_tail(&dev->rxq_overflow, skb2);
3967 			}
3968 		}
3969 
3970 		skb_pull(skb, size);
3971 
3972 		/* skip padding bytes before the next frame starts */
3973 		if (skb->len)
3974 			skb_pull(skb, align_count);
3975 	}
3976 
3977 	return 1;
3978 }
3979 
3980 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3981 			      int budget, int *work_done)
3982 {
3983 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3984 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3985 		dev->net->stats.rx_errors++;
3986 	}
3987 }
3988 
3989 static void rx_complete(struct urb *urb)
3990 {
3991 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3992 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3993 	struct lan78xx_net *dev = entry->dev;
3994 	int urb_status = urb->status;
3995 	enum skb_state state;
3996 
3997 	netif_dbg(dev, rx_status, dev->net,
3998 		  "rx done: status %d", urb->status);
3999 
4000 	skb_put(skb, urb->actual_length);
4001 	state = rx_done;
4002 
4003 	if (urb != entry->urb)
4004 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
4005 
4006 	switch (urb_status) {
4007 	case 0:
4008 		if (skb->len < RX_SKB_MIN_LEN) {
4009 			state = rx_cleanup;
4010 			dev->net->stats.rx_errors++;
4011 			dev->net->stats.rx_length_errors++;
4012 			netif_dbg(dev, rx_err, dev->net,
4013 				  "rx length %d\n", skb->len);
4014 		}
4015 		usb_mark_last_busy(dev->udev);
4016 		break;
4017 	case -EPIPE:
4018 		dev->net->stats.rx_errors++;
4019 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4020 		fallthrough;
4021 	case -ECONNRESET:				/* async unlink */
4022 	case -ESHUTDOWN:				/* hardware gone */
4023 		netif_dbg(dev, ifdown, dev->net,
4024 			  "rx shutdown, code %d\n", urb_status);
4025 		state = rx_cleanup;
4026 		break;
4027 	case -EPROTO:
4028 	case -ETIME:
4029 	case -EILSEQ:
4030 		dev->net->stats.rx_errors++;
4031 		state = rx_cleanup;
4032 		break;
4033 
4034 	/* data overrun ... flush fifo? */
4035 	case -EOVERFLOW:
4036 		dev->net->stats.rx_over_errors++;
4037 		fallthrough;
4038 
4039 	default:
4040 		state = rx_cleanup;
4041 		dev->net->stats.rx_errors++;
4042 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4043 		break;
4044 	}
4045 
4046 	state = defer_bh(dev, skb, &dev->rxq, state);
4047 }
4048 
4049 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4050 {
4051 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4052 	size_t size = dev->rx_urb_size;
4053 	struct urb *urb = entry->urb;
4054 	unsigned long lockflags;
4055 	int ret = 0;
4056 
4057 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4058 			  skb->data, size, rx_complete, skb);
4059 
4060 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4061 
4062 	if (netif_device_present(dev->net) &&
4063 	    netif_running(dev->net) &&
4064 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4065 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4066 		ret = usb_submit_urb(urb, flags);
4067 		switch (ret) {
4068 		case 0:
4069 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4070 			break;
4071 		case -EPIPE:
4072 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4073 			break;
4074 		case -ENODEV:
4075 		case -ENOENT:
4076 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4077 			netif_device_detach(dev->net);
4078 			break;
4079 		case -EHOSTUNREACH:
4080 			ret = -ENOLINK;
4081 			napi_schedule(&dev->napi);
4082 			break;
4083 		default:
4084 			netif_dbg(dev, rx_err, dev->net,
4085 				  "rx submit, %d\n", ret);
4086 			napi_schedule(&dev->napi);
4087 			break;
4088 		}
4089 	} else {
4090 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4091 		ret = -ENOLINK;
4092 	}
4093 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4094 
4095 	if (ret)
4096 		lan78xx_release_rx_buf(dev, skb);
4097 
4098 	return ret;
4099 }
4100 
4101 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4102 {
4103 	struct sk_buff *rx_buf;
4104 
4105 	/* Ensure the maximum number of Rx URBs is submitted
4106 	 */
4107 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4108 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4109 			break;
4110 	}
4111 }
4112 
4113 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4114 				    struct sk_buff *rx_buf)
4115 {
4116 	/* reset SKB data pointers */
4117 
4118 	rx_buf->data = rx_buf->head;
4119 	skb_reset_tail_pointer(rx_buf);
4120 	rx_buf->len = 0;
4121 	rx_buf->data_len = 0;
4122 
4123 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4124 }
4125 
4126 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4127 {
4128 	u32 tx_cmd_a;
4129 	u32 tx_cmd_b;
4130 
4131 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4132 
4133 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4134 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4135 
4136 	tx_cmd_b = 0;
4137 	if (skb_is_gso(skb)) {
4138 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4139 
4140 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4141 
4142 		tx_cmd_a |= TX_CMD_A_LSO_;
4143 	}
4144 
4145 	if (skb_vlan_tag_present(skb)) {
4146 		tx_cmd_a |= TX_CMD_A_IVTG_;
4147 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4148 	}
4149 
4150 	put_unaligned_le32(tx_cmd_a, buffer);
4151 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4152 }
4153 
4154 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4155 					    struct sk_buff *tx_buf)
4156 {
4157 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4158 	int remain = dev->tx_urb_size;
4159 	u8 *tx_data = tx_buf->data;
4160 	u32 urb_len = 0;
4161 
4162 	entry->num_of_packet = 0;
4163 	entry->length = 0;
4164 
4165 	/* Work through the pending SKBs and copy the data of each SKB into
4166 	 * the URB buffer if there room for all the SKB data.
4167 	 *
4168 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4169 	 */
4170 	while (remain >= TX_SKB_MIN_LEN) {
4171 		unsigned int pending_bytes;
4172 		unsigned int align_bytes;
4173 		struct sk_buff *skb;
4174 		unsigned int len;
4175 
4176 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4177 
4178 		if (!skb)
4179 			break;
4180 
4181 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4182 			      TX_ALIGNMENT;
4183 		len = align_bytes + TX_CMD_LEN + skb->len;
4184 		if (len > remain) {
4185 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4186 			break;
4187 		}
4188 
4189 		tx_data += align_bytes;
4190 
4191 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4192 		tx_data += TX_CMD_LEN;
4193 
4194 		len = skb->len;
4195 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4196 			struct net_device_stats *stats = &dev->net->stats;
4197 
4198 			stats->tx_dropped++;
4199 			dev_kfree_skb_any(skb);
4200 			tx_data -= TX_CMD_LEN;
4201 			continue;
4202 		}
4203 
4204 		tx_data += len;
4205 		entry->length += len;
4206 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4207 
4208 		dev_kfree_skb_any(skb);
4209 
4210 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4211 
4212 		remain = dev->tx_urb_size - urb_len;
4213 	}
4214 
4215 	skb_put(tx_buf, urb_len);
4216 
4217 	return entry;
4218 }
4219 
4220 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4221 {
4222 	int ret;
4223 
4224 	/* Start the stack Tx queue if it was stopped
4225 	 */
4226 	netif_tx_lock(dev->net);
4227 	if (netif_queue_stopped(dev->net)) {
4228 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4229 			netif_wake_queue(dev->net);
4230 	}
4231 	netif_tx_unlock(dev->net);
4232 
4233 	/* Go through the Tx pending queue and set up URBs to transfer
4234 	 * the data to the device. Stop if no more pending data or URBs,
4235 	 * or if an error occurs when a URB is submitted.
4236 	 */
4237 	do {
4238 		struct skb_data *entry;
4239 		struct sk_buff *tx_buf;
4240 		unsigned long flags;
4241 
4242 		if (skb_queue_empty(&dev->txq_pend))
4243 			break;
4244 
4245 		tx_buf = lan78xx_get_tx_buf(dev);
4246 		if (!tx_buf)
4247 			break;
4248 
4249 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4250 
4251 		spin_lock_irqsave(&dev->txq.lock, flags);
4252 		ret = usb_autopm_get_interface_async(dev->intf);
4253 		if (ret < 0) {
4254 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4255 			goto out;
4256 		}
4257 
4258 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4259 				  tx_buf->data, tx_buf->len, tx_complete,
4260 				  tx_buf);
4261 
4262 		if (tx_buf->len % dev->maxpacket == 0) {
4263 			/* send USB_ZERO_PACKET */
4264 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4265 		}
4266 
4267 #ifdef CONFIG_PM
4268 		/* if device is asleep stop outgoing packet processing */
4269 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4270 			usb_anchor_urb(entry->urb, &dev->deferred);
4271 			netif_stop_queue(dev->net);
4272 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4273 			netdev_dbg(dev->net,
4274 				   "Delaying transmission for resumption\n");
4275 			return;
4276 		}
4277 #endif
4278 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4279 		switch (ret) {
4280 		case 0:
4281 			netif_trans_update(dev->net);
4282 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4283 			break;
4284 		case -EPIPE:
4285 			netif_stop_queue(dev->net);
4286 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4287 			usb_autopm_put_interface_async(dev->intf);
4288 			break;
4289 		case -ENODEV:
4290 		case -ENOENT:
4291 			netif_dbg(dev, tx_err, dev->net,
4292 				  "tx submit urb err %d (disconnected?)", ret);
4293 			netif_device_detach(dev->net);
4294 			break;
4295 		default:
4296 			usb_autopm_put_interface_async(dev->intf);
4297 			netif_dbg(dev, tx_err, dev->net,
4298 				  "tx submit urb err %d\n", ret);
4299 			break;
4300 		}
4301 
4302 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4303 
4304 		if (ret) {
4305 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4306 out:
4307 			dev->net->stats.tx_dropped += entry->num_of_packet;
4308 			lan78xx_release_tx_buf(dev, tx_buf);
4309 		}
4310 	} while (ret == 0);
4311 }
4312 
4313 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4314 {
4315 	struct sk_buff_head done;
4316 	struct sk_buff *rx_buf;
4317 	struct skb_data *entry;
4318 	unsigned long flags;
4319 	int work_done = 0;
4320 
4321 	/* Pass frames received in the last NAPI cycle before
4322 	 * working on newly completed URBs.
4323 	 */
4324 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4325 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4326 		++work_done;
4327 	}
4328 
4329 	/* Take a snapshot of the done queue and move items to a
4330 	 * temporary queue. Rx URB completions will continue to add
4331 	 * to the done queue.
4332 	 */
4333 	__skb_queue_head_init(&done);
4334 
4335 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4336 	skb_queue_splice_init(&dev->rxq_done, &done);
4337 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4338 
4339 	/* Extract receive frames from completed URBs and
4340 	 * pass them to the stack. Re-submit each completed URB.
4341 	 */
4342 	while ((work_done < budget) &&
4343 	       (rx_buf = __skb_dequeue(&done))) {
4344 		entry = (struct skb_data *)(rx_buf->cb);
4345 		switch (entry->state) {
4346 		case rx_done:
4347 			rx_process(dev, rx_buf, budget, &work_done);
4348 			break;
4349 		case rx_cleanup:
4350 			break;
4351 		default:
4352 			netdev_dbg(dev->net, "rx buf state %d\n",
4353 				   entry->state);
4354 			break;
4355 		}
4356 
4357 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4358 	}
4359 
4360 	/* If budget was consumed before processing all the URBs put them
4361 	 * back on the front of the done queue. They will be first to be
4362 	 * processed in the next NAPI cycle.
4363 	 */
4364 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4365 	skb_queue_splice(&done, &dev->rxq_done);
4366 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4367 
4368 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4369 		/* reset update timer delta */
4370 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4371 			dev->delta = 1;
4372 			mod_timer(&dev->stat_monitor,
4373 				  jiffies + STAT_UPDATE_TIMER);
4374 		}
4375 
4376 		/* Submit all free Rx URBs */
4377 
4378 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4379 			lan78xx_rx_urb_submit_all(dev);
4380 
4381 		/* Submit new Tx URBs */
4382 
4383 		lan78xx_tx_bh(dev);
4384 	}
4385 
4386 	return work_done;
4387 }
4388 
4389 static int lan78xx_poll(struct napi_struct *napi, int budget)
4390 {
4391 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4392 	int result = budget;
4393 	int work_done;
4394 
4395 	/* Don't do any work if the device is suspended */
4396 
4397 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4398 		napi_complete_done(napi, 0);
4399 		return 0;
4400 	}
4401 
4402 	/* Process completed URBs and submit new URBs */
4403 
4404 	work_done = lan78xx_bh(dev, budget);
4405 
4406 	if (work_done < budget) {
4407 		napi_complete_done(napi, work_done);
4408 
4409 		/* Start a new polling cycle if data was received or
4410 		 * data is waiting to be transmitted.
4411 		 */
4412 		if (!skb_queue_empty(&dev->rxq_done)) {
4413 			napi_schedule(napi);
4414 		} else if (netif_carrier_ok(dev->net)) {
4415 			if (skb_queue_empty(&dev->txq) &&
4416 			    !skb_queue_empty(&dev->txq_pend)) {
4417 				napi_schedule(napi);
4418 			} else {
4419 				netif_tx_lock(dev->net);
4420 				if (netif_queue_stopped(dev->net)) {
4421 					netif_wake_queue(dev->net);
4422 					napi_schedule(napi);
4423 				}
4424 				netif_tx_unlock(dev->net);
4425 			}
4426 		}
4427 		result = work_done;
4428 	}
4429 
4430 	return result;
4431 }
4432 
4433 static void lan78xx_delayedwork(struct work_struct *work)
4434 {
4435 	int status;
4436 	struct lan78xx_net *dev;
4437 
4438 	dev = container_of(work, struct lan78xx_net, wq.work);
4439 
4440 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4441 		return;
4442 
4443 	if (usb_autopm_get_interface(dev->intf) < 0)
4444 		return;
4445 
4446 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4447 		unlink_urbs(dev, &dev->txq);
4448 
4449 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4450 		if (status < 0 &&
4451 		    status != -EPIPE &&
4452 		    status != -ESHUTDOWN) {
4453 			if (netif_msg_tx_err(dev))
4454 				netdev_err(dev->net,
4455 					   "can't clear tx halt, status %d\n",
4456 					   status);
4457 		} else {
4458 			clear_bit(EVENT_TX_HALT, &dev->flags);
4459 			if (status != -ESHUTDOWN)
4460 				netif_wake_queue(dev->net);
4461 		}
4462 	}
4463 
4464 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4465 		unlink_urbs(dev, &dev->rxq);
4466 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4467 		if (status < 0 &&
4468 		    status != -EPIPE &&
4469 		    status != -ESHUTDOWN) {
4470 			if (netif_msg_rx_err(dev))
4471 				netdev_err(dev->net,
4472 					   "can't clear rx halt, status %d\n",
4473 					   status);
4474 		} else {
4475 			clear_bit(EVENT_RX_HALT, &dev->flags);
4476 			napi_schedule(&dev->napi);
4477 		}
4478 	}
4479 
4480 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4481 		int ret = 0;
4482 
4483 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4484 		if (lan78xx_link_reset(dev) < 0) {
4485 			netdev_info(dev->net, "link reset failed (%d)\n",
4486 				    ret);
4487 		}
4488 	}
4489 
4490 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4491 		lan78xx_update_stats(dev);
4492 
4493 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4494 
4495 		mod_timer(&dev->stat_monitor,
4496 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4497 
4498 		dev->delta = min((dev->delta * 2), 50);
4499 	}
4500 
4501 	usb_autopm_put_interface(dev->intf);
4502 }
4503 
4504 static void intr_complete(struct urb *urb)
4505 {
4506 	struct lan78xx_net *dev = urb->context;
4507 	int status = urb->status;
4508 
4509 	switch (status) {
4510 	/* success */
4511 	case 0:
4512 		lan78xx_status(dev, urb);
4513 		break;
4514 
4515 	/* software-driven interface shutdown */
4516 	case -ENOENT:			/* urb killed */
4517 	case -ENODEV:			/* hardware gone */
4518 	case -ESHUTDOWN:		/* hardware gone */
4519 		netif_dbg(dev, ifdown, dev->net,
4520 			  "intr shutdown, code %d\n", status);
4521 		return;
4522 
4523 	/* NOTE:  not throttling like RX/TX, since this endpoint
4524 	 * already polls infrequently
4525 	 */
4526 	default:
4527 		netdev_dbg(dev->net, "intr status %d\n", status);
4528 		break;
4529 	}
4530 
4531 	if (!netif_device_present(dev->net) ||
4532 	    !netif_running(dev->net)) {
4533 		netdev_warn(dev->net, "not submitting new status URB");
4534 		return;
4535 	}
4536 
4537 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4538 	status = usb_submit_urb(urb, GFP_ATOMIC);
4539 
4540 	switch (status) {
4541 	case  0:
4542 		break;
4543 	case -ENODEV:
4544 	case -ENOENT:
4545 		netif_dbg(dev, timer, dev->net,
4546 			  "intr resubmit %d (disconnect?)", status);
4547 		netif_device_detach(dev->net);
4548 		break;
4549 	default:
4550 		netif_err(dev, timer, dev->net,
4551 			  "intr resubmit --> %d\n", status);
4552 		break;
4553 	}
4554 }
4555 
4556 static void lan78xx_disconnect(struct usb_interface *intf)
4557 {
4558 	struct lan78xx_net *dev;
4559 	struct usb_device *udev;
4560 	struct net_device *net;
4561 	struct phy_device *phydev;
4562 
4563 	dev = usb_get_intfdata(intf);
4564 	usb_set_intfdata(intf, NULL);
4565 	if (!dev)
4566 		return;
4567 
4568 	netif_napi_del(&dev->napi);
4569 
4570 	udev = interface_to_usbdev(intf);
4571 	net = dev->net;
4572 
4573 	unregister_netdev(net);
4574 
4575 	timer_shutdown_sync(&dev->stat_monitor);
4576 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4577 	cancel_delayed_work_sync(&dev->wq);
4578 
4579 	phydev = net->phydev;
4580 
4581 	phy_disconnect(net->phydev);
4582 
4583 	if (phy_is_pseudo_fixed_link(phydev)) {
4584 		fixed_phy_unregister(phydev);
4585 		phy_device_free(phydev);
4586 	}
4587 
4588 	usb_scuttle_anchored_urbs(&dev->deferred);
4589 
4590 	lan78xx_unbind(dev, intf);
4591 
4592 	lan78xx_free_tx_resources(dev);
4593 	lan78xx_free_rx_resources(dev);
4594 
4595 	usb_kill_urb(dev->urb_intr);
4596 	usb_free_urb(dev->urb_intr);
4597 
4598 	free_netdev(net);
4599 	usb_put_dev(udev);
4600 }
4601 
4602 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4603 {
4604 	struct lan78xx_net *dev = netdev_priv(net);
4605 
4606 	unlink_urbs(dev, &dev->txq);
4607 	napi_schedule(&dev->napi);
4608 }
4609 
4610 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4611 						struct net_device *netdev,
4612 						netdev_features_t features)
4613 {
4614 	struct lan78xx_net *dev = netdev_priv(netdev);
4615 
4616 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4617 		features &= ~NETIF_F_GSO_MASK;
4618 
4619 	features = vlan_features_check(skb, features);
4620 	features = vxlan_features_check(skb, features);
4621 
4622 	return features;
4623 }
4624 
4625 static const struct net_device_ops lan78xx_netdev_ops = {
4626 	.ndo_open		= lan78xx_open,
4627 	.ndo_stop		= lan78xx_stop,
4628 	.ndo_start_xmit		= lan78xx_start_xmit,
4629 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4630 	.ndo_change_mtu		= lan78xx_change_mtu,
4631 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4632 	.ndo_validate_addr	= eth_validate_addr,
4633 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4634 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4635 	.ndo_set_features	= lan78xx_set_features,
4636 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4637 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4638 	.ndo_features_check	= lan78xx_features_check,
4639 };
4640 
4641 static void lan78xx_stat_monitor(struct timer_list *t)
4642 {
4643 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4644 
4645 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4646 }
4647 
4648 static int lan78xx_probe(struct usb_interface *intf,
4649 			 const struct usb_device_id *id)
4650 {
4651 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4652 	struct lan78xx_net *dev;
4653 	struct net_device *netdev;
4654 	struct usb_device *udev;
4655 	int ret;
4656 	unsigned int maxp;
4657 	unsigned int period;
4658 	u8 *buf = NULL;
4659 
4660 	udev = interface_to_usbdev(intf);
4661 	udev = usb_get_dev(udev);
4662 
4663 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4664 	if (!netdev) {
4665 		dev_err(&intf->dev, "Error: OOM\n");
4666 		ret = -ENOMEM;
4667 		goto out1;
4668 	}
4669 
4670 	/* netdev_printk() needs this */
4671 	SET_NETDEV_DEV(netdev, &intf->dev);
4672 
4673 	dev = netdev_priv(netdev);
4674 	dev->udev = udev;
4675 	dev->intf = intf;
4676 	dev->net = netdev;
4677 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4678 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4679 
4680 	skb_queue_head_init(&dev->rxq);
4681 	skb_queue_head_init(&dev->txq);
4682 	skb_queue_head_init(&dev->rxq_done);
4683 	skb_queue_head_init(&dev->txq_pend);
4684 	skb_queue_head_init(&dev->rxq_overflow);
4685 	mutex_init(&dev->mdiobus_mutex);
4686 	mutex_init(&dev->dev_mutex);
4687 
4688 	ret = lan78xx_urb_config_init(dev);
4689 	if (ret < 0)
4690 		goto out2;
4691 
4692 	ret = lan78xx_alloc_tx_resources(dev);
4693 	if (ret < 0)
4694 		goto out2;
4695 
4696 	ret = lan78xx_alloc_rx_resources(dev);
4697 	if (ret < 0)
4698 		goto out3;
4699 
4700 	/* MTU range: 68 - 9000 */
4701 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4702 
4703 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4704 
4705 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4706 
4707 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4708 	init_usb_anchor(&dev->deferred);
4709 
4710 	netdev->netdev_ops = &lan78xx_netdev_ops;
4711 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4712 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4713 
4714 	dev->delta = 1;
4715 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4716 
4717 	mutex_init(&dev->stats.access_lock);
4718 
4719 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4720 		ret = -ENODEV;
4721 		goto out4;
4722 	}
4723 
4724 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4725 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4726 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4727 		ret = -ENODEV;
4728 		goto out4;
4729 	}
4730 
4731 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4732 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4733 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4734 		ret = -ENODEV;
4735 		goto out4;
4736 	}
4737 
4738 	ep_intr = &intf->cur_altsetting->endpoint[2];
4739 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4740 		ret = -ENODEV;
4741 		goto out4;
4742 	}
4743 
4744 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4745 					usb_endpoint_num(&ep_intr->desc));
4746 
4747 	ret = lan78xx_bind(dev, intf);
4748 	if (ret < 0)
4749 		goto out4;
4750 
4751 	period = ep_intr->desc.bInterval;
4752 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4753 
4754 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4755 	if (!dev->urb_intr) {
4756 		ret = -ENOMEM;
4757 		goto out5;
4758 	}
4759 
4760 	buf = kmalloc(maxp, GFP_KERNEL);
4761 	if (!buf) {
4762 		ret = -ENOMEM;
4763 		goto free_urbs;
4764 	}
4765 
4766 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4767 			 dev->pipe_intr, buf, maxp,
4768 			 intr_complete, dev, period);
4769 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4770 
4771 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4772 
4773 	/* Reject broken descriptors. */
4774 	if (dev->maxpacket == 0) {
4775 		ret = -ENODEV;
4776 		goto free_urbs;
4777 	}
4778 
4779 	/* driver requires remote-wakeup capability during autosuspend. */
4780 	intf->needs_remote_wakeup = 1;
4781 
4782 	ret = lan78xx_phy_init(dev);
4783 	if (ret < 0)
4784 		goto free_urbs;
4785 
4786 	ret = register_netdev(netdev);
4787 	if (ret != 0) {
4788 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4789 		goto out8;
4790 	}
4791 
4792 	usb_set_intfdata(intf, dev);
4793 
4794 	ret = device_set_wakeup_enable(&udev->dev, true);
4795 
4796 	 /* Default delay of 2sec has more overhead than advantage.
4797 	  * Set to 10sec as default.
4798 	  */
4799 	pm_runtime_set_autosuspend_delay(&udev->dev,
4800 					 DEFAULT_AUTOSUSPEND_DELAY);
4801 
4802 	return 0;
4803 
4804 out8:
4805 	phy_disconnect(netdev->phydev);
4806 free_urbs:
4807 	usb_free_urb(dev->urb_intr);
4808 out5:
4809 	lan78xx_unbind(dev, intf);
4810 out4:
4811 	netif_napi_del(&dev->napi);
4812 	lan78xx_free_rx_resources(dev);
4813 out3:
4814 	lan78xx_free_tx_resources(dev);
4815 out2:
4816 	free_netdev(netdev);
4817 out1:
4818 	usb_put_dev(udev);
4819 
4820 	return ret;
4821 }
4822 
4823 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4824 {
4825 	const u16 crc16poly = 0x8005;
4826 	int i;
4827 	u16 bit, crc, msb;
4828 	u8 data;
4829 
4830 	crc = 0xFFFF;
4831 	for (i = 0; i < len; i++) {
4832 		data = *buf++;
4833 		for (bit = 0; bit < 8; bit++) {
4834 			msb = crc >> 15;
4835 			crc <<= 1;
4836 
4837 			if (msb ^ (u16)(data & 1)) {
4838 				crc ^= crc16poly;
4839 				crc |= (u16)0x0001U;
4840 			}
4841 			data >>= 1;
4842 		}
4843 	}
4844 
4845 	return crc;
4846 }
4847 
4848 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4849 {
4850 	u32 buf;
4851 	int ret;
4852 
4853 	ret = lan78xx_stop_tx_path(dev);
4854 	if (ret < 0)
4855 		return ret;
4856 
4857 	ret = lan78xx_stop_rx_path(dev);
4858 	if (ret < 0)
4859 		return ret;
4860 
4861 	/* auto suspend (selective suspend) */
4862 
4863 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4864 	if (ret < 0)
4865 		return ret;
4866 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4867 	if (ret < 0)
4868 		return ret;
4869 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4870 	if (ret < 0)
4871 		return ret;
4872 
4873 	/* set goodframe wakeup */
4874 
4875 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4876 	if (ret < 0)
4877 		return ret;
4878 
4879 	buf |= WUCSR_RFE_WAKE_EN_;
4880 	buf |= WUCSR_STORE_WAKE_;
4881 
4882 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4883 	if (ret < 0)
4884 		return ret;
4885 
4886 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4887 	if (ret < 0)
4888 		return ret;
4889 
4890 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4891 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4892 	buf |= PMT_CTL_PHY_WAKE_EN_;
4893 	buf |= PMT_CTL_WOL_EN_;
4894 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4895 	buf |= PMT_CTL_SUS_MODE_3_;
4896 
4897 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4898 	if (ret < 0)
4899 		return ret;
4900 
4901 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4902 	if (ret < 0)
4903 		return ret;
4904 
4905 	buf |= PMT_CTL_WUPS_MASK_;
4906 
4907 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4908 	if (ret < 0)
4909 		return ret;
4910 
4911 	ret = lan78xx_start_rx_path(dev);
4912 
4913 	return ret;
4914 }
4915 
4916 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4917 {
4918 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4919 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4920 	const u8 arp_type[2] = { 0x08, 0x06 };
4921 	u32 temp_pmt_ctl;
4922 	int mask_index;
4923 	u32 temp_wucsr;
4924 	u32 buf;
4925 	u16 crc;
4926 	int ret;
4927 
4928 	ret = lan78xx_stop_tx_path(dev);
4929 	if (ret < 0)
4930 		return ret;
4931 	ret = lan78xx_stop_rx_path(dev);
4932 	if (ret < 0)
4933 		return ret;
4934 
4935 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4936 	if (ret < 0)
4937 		return ret;
4938 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4939 	if (ret < 0)
4940 		return ret;
4941 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4942 	if (ret < 0)
4943 		return ret;
4944 
4945 	temp_wucsr = 0;
4946 
4947 	temp_pmt_ctl = 0;
4948 
4949 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4950 	if (ret < 0)
4951 		return ret;
4952 
4953 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4954 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4955 
4956 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4957 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4958 		if (ret < 0)
4959 			return ret;
4960 	}
4961 
4962 	mask_index = 0;
4963 	if (wol & WAKE_PHY) {
4964 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4965 
4966 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4967 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4968 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4969 	}
4970 	if (wol & WAKE_MAGIC) {
4971 		temp_wucsr |= WUCSR_MPEN_;
4972 
4973 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4974 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4975 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4976 	}
4977 	if (wol & WAKE_BCAST) {
4978 		temp_wucsr |= WUCSR_BCST_EN_;
4979 
4980 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4981 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4982 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4983 	}
4984 	if (wol & WAKE_MCAST) {
4985 		temp_wucsr |= WUCSR_WAKE_EN_;
4986 
4987 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4988 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4989 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4990 					WUF_CFGX_EN_ |
4991 					WUF_CFGX_TYPE_MCAST_ |
4992 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4993 					(crc & WUF_CFGX_CRC16_MASK_));
4994 		if (ret < 0)
4995 			return ret;
4996 
4997 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4998 		if (ret < 0)
4999 			return ret;
5000 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5001 		if (ret < 0)
5002 			return ret;
5003 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5004 		if (ret < 0)
5005 			return ret;
5006 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5007 		if (ret < 0)
5008 			return ret;
5009 
5010 		mask_index++;
5011 
5012 		/* for IPv6 Multicast */
5013 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
5014 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5015 					WUF_CFGX_EN_ |
5016 					WUF_CFGX_TYPE_MCAST_ |
5017 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5018 					(crc & WUF_CFGX_CRC16_MASK_));
5019 		if (ret < 0)
5020 			return ret;
5021 
5022 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
5023 		if (ret < 0)
5024 			return ret;
5025 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5026 		if (ret < 0)
5027 			return ret;
5028 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5029 		if (ret < 0)
5030 			return ret;
5031 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5032 		if (ret < 0)
5033 			return ret;
5034 
5035 		mask_index++;
5036 
5037 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5038 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5039 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5040 	}
5041 	if (wol & WAKE_UCAST) {
5042 		temp_wucsr |= WUCSR_PFDA_EN_;
5043 
5044 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5045 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5046 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5047 	}
5048 	if (wol & WAKE_ARP) {
5049 		temp_wucsr |= WUCSR_WAKE_EN_;
5050 
5051 		/* set WUF_CFG & WUF_MASK
5052 		 * for packettype (offset 12,13) = ARP (0x0806)
5053 		 */
5054 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5055 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5056 					WUF_CFGX_EN_ |
5057 					WUF_CFGX_TYPE_ALL_ |
5058 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5059 					(crc & WUF_CFGX_CRC16_MASK_));
5060 		if (ret < 0)
5061 			return ret;
5062 
5063 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5064 		if (ret < 0)
5065 			return ret;
5066 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5067 		if (ret < 0)
5068 			return ret;
5069 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5070 		if (ret < 0)
5071 			return ret;
5072 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5073 		if (ret < 0)
5074 			return ret;
5075 
5076 		mask_index++;
5077 
5078 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5079 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5080 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5081 	}
5082 
5083 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5084 	if (ret < 0)
5085 		return ret;
5086 
5087 	/* when multiple WOL bits are set */
5088 	if (hweight_long((unsigned long)wol) > 1) {
5089 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5090 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5091 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5092 	}
5093 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5094 	if (ret < 0)
5095 		return ret;
5096 
5097 	/* clear WUPS */
5098 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5099 	if (ret < 0)
5100 		return ret;
5101 
5102 	buf |= PMT_CTL_WUPS_MASK_;
5103 
5104 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5105 	if (ret < 0)
5106 		return ret;
5107 
5108 	ret = lan78xx_start_rx_path(dev);
5109 
5110 	return ret;
5111 }
5112 
5113 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5114 {
5115 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5116 	bool dev_open;
5117 	int ret;
5118 
5119 	mutex_lock(&dev->dev_mutex);
5120 
5121 	netif_dbg(dev, ifdown, dev->net,
5122 		  "suspending: pm event %#x", message.event);
5123 
5124 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5125 
5126 	if (dev_open) {
5127 		spin_lock_irq(&dev->txq.lock);
5128 		/* don't autosuspend while transmitting */
5129 		if ((skb_queue_len(&dev->txq) ||
5130 		     skb_queue_len(&dev->txq_pend)) &&
5131 		    PMSG_IS_AUTO(message)) {
5132 			spin_unlock_irq(&dev->txq.lock);
5133 			ret = -EBUSY;
5134 			goto out;
5135 		} else {
5136 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5137 			spin_unlock_irq(&dev->txq.lock);
5138 		}
5139 
5140 		/* stop RX */
5141 		ret = lan78xx_stop_rx_path(dev);
5142 		if (ret < 0)
5143 			goto out;
5144 
5145 		ret = lan78xx_flush_rx_fifo(dev);
5146 		if (ret < 0)
5147 			goto out;
5148 
5149 		/* stop Tx */
5150 		ret = lan78xx_stop_tx_path(dev);
5151 		if (ret < 0)
5152 			goto out;
5153 
5154 		/* empty out the Rx and Tx queues */
5155 		netif_device_detach(dev->net);
5156 		lan78xx_terminate_urbs(dev);
5157 		usb_kill_urb(dev->urb_intr);
5158 
5159 		/* reattach */
5160 		netif_device_attach(dev->net);
5161 
5162 		timer_delete(&dev->stat_monitor);
5163 
5164 		if (PMSG_IS_AUTO(message)) {
5165 			ret = lan78xx_set_auto_suspend(dev);
5166 			if (ret < 0)
5167 				goto out;
5168 		} else {
5169 			struct lan78xx_priv *pdata;
5170 
5171 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5172 			netif_carrier_off(dev->net);
5173 			ret = lan78xx_set_suspend(dev, pdata->wol);
5174 			if (ret < 0)
5175 				goto out;
5176 		}
5177 	} else {
5178 		/* Interface is down; don't allow WOL and PHY
5179 		 * events to wake up the host
5180 		 */
5181 		u32 buf;
5182 
5183 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5184 
5185 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5186 		if (ret < 0)
5187 			goto out;
5188 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5189 		if (ret < 0)
5190 			goto out;
5191 
5192 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5193 		if (ret < 0)
5194 			goto out;
5195 
5196 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5197 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5198 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5199 		buf |= PMT_CTL_SUS_MODE_3_;
5200 
5201 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5202 		if (ret < 0)
5203 			goto out;
5204 
5205 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5206 		if (ret < 0)
5207 			goto out;
5208 
5209 		buf |= PMT_CTL_WUPS_MASK_;
5210 
5211 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5212 		if (ret < 0)
5213 			goto out;
5214 	}
5215 
5216 	ret = 0;
5217 out:
5218 	mutex_unlock(&dev->dev_mutex);
5219 
5220 	return ret;
5221 }
5222 
5223 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5224 {
5225 	bool pipe_halted = false;
5226 	struct urb *urb;
5227 
5228 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5229 		struct sk_buff *skb = urb->context;
5230 		int ret;
5231 
5232 		if (!netif_device_present(dev->net) ||
5233 		    !netif_carrier_ok(dev->net) ||
5234 		    pipe_halted) {
5235 			lan78xx_release_tx_buf(dev, skb);
5236 			continue;
5237 		}
5238 
5239 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5240 
5241 		if (ret == 0) {
5242 			netif_trans_update(dev->net);
5243 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5244 		} else {
5245 			if (ret == -EPIPE) {
5246 				netif_stop_queue(dev->net);
5247 				pipe_halted = true;
5248 			} else if (ret == -ENODEV) {
5249 				netif_device_detach(dev->net);
5250 			}
5251 
5252 			lan78xx_release_tx_buf(dev, skb);
5253 		}
5254 	}
5255 
5256 	return pipe_halted;
5257 }
5258 
5259 static int lan78xx_resume(struct usb_interface *intf)
5260 {
5261 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5262 	bool dev_open;
5263 	int ret;
5264 
5265 	mutex_lock(&dev->dev_mutex);
5266 
5267 	netif_dbg(dev, ifup, dev->net, "resuming device");
5268 
5269 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5270 
5271 	if (dev_open) {
5272 		bool pipe_halted = false;
5273 
5274 		ret = lan78xx_flush_tx_fifo(dev);
5275 		if (ret < 0)
5276 			goto out;
5277 
5278 		if (dev->urb_intr) {
5279 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5280 
5281 			if (ret < 0) {
5282 				if (ret == -ENODEV)
5283 					netif_device_detach(dev->net);
5284 				netdev_warn(dev->net, "Failed to submit intr URB");
5285 			}
5286 		}
5287 
5288 		spin_lock_irq(&dev->txq.lock);
5289 
5290 		if (netif_device_present(dev->net)) {
5291 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5292 
5293 			if (pipe_halted)
5294 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5295 		}
5296 
5297 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5298 
5299 		spin_unlock_irq(&dev->txq.lock);
5300 
5301 		if (!pipe_halted &&
5302 		    netif_device_present(dev->net) &&
5303 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5304 			netif_start_queue(dev->net);
5305 
5306 		ret = lan78xx_start_tx_path(dev);
5307 		if (ret < 0)
5308 			goto out;
5309 
5310 		napi_schedule(&dev->napi);
5311 
5312 		if (!timer_pending(&dev->stat_monitor)) {
5313 			dev->delta = 1;
5314 			mod_timer(&dev->stat_monitor,
5315 				  jiffies + STAT_UPDATE_TIMER);
5316 		}
5317 
5318 	} else {
5319 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5320 	}
5321 
5322 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5323 	if (ret < 0)
5324 		goto out;
5325 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5326 	if (ret < 0)
5327 		goto out;
5328 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5329 	if (ret < 0)
5330 		goto out;
5331 
5332 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5333 					     WUCSR2_ARP_RCD_ |
5334 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5335 					     WUCSR2_IPV4_TCPSYN_RCD_);
5336 	if (ret < 0)
5337 		goto out;
5338 
5339 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5340 					    WUCSR_EEE_RX_WAKE_ |
5341 					    WUCSR_PFDA_FR_ |
5342 					    WUCSR_RFE_WAKE_FR_ |
5343 					    WUCSR_WUFR_ |
5344 					    WUCSR_MPR_ |
5345 					    WUCSR_BCST_FR_);
5346 	if (ret < 0)
5347 		goto out;
5348 
5349 	ret = 0;
5350 out:
5351 	mutex_unlock(&dev->dev_mutex);
5352 
5353 	return ret;
5354 }
5355 
5356 static int lan78xx_reset_resume(struct usb_interface *intf)
5357 {
5358 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5359 	int ret;
5360 
5361 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5362 
5363 	ret = lan78xx_reset(dev);
5364 	if (ret < 0)
5365 		return ret;
5366 
5367 	phy_start(dev->net->phydev);
5368 
5369 	ret = lan78xx_resume(intf);
5370 
5371 	return ret;
5372 }
5373 
5374 static const struct usb_device_id products[] = {
5375 	{
5376 	/* LAN7800 USB Gigabit Ethernet Device */
5377 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5378 	},
5379 	{
5380 	/* LAN7850 USB Gigabit Ethernet Device */
5381 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5382 	},
5383 	{
5384 	/* LAN7801 USB Gigabit Ethernet Device */
5385 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5386 	},
5387 	{
5388 	/* ATM2-AF USB Gigabit Ethernet Device */
5389 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5390 	},
5391 	{},
5392 };
5393 MODULE_DEVICE_TABLE(usb, products);
5394 
5395 static struct usb_driver lan78xx_driver = {
5396 	.name			= DRIVER_NAME,
5397 	.id_table		= products,
5398 	.probe			= lan78xx_probe,
5399 	.disconnect		= lan78xx_disconnect,
5400 	.suspend		= lan78xx_suspend,
5401 	.resume			= lan78xx_resume,
5402 	.reset_resume		= lan78xx_reset_resume,
5403 	.supports_autosuspend	= 1,
5404 	.disable_hub_initiated_lpm = 1,
5405 };
5406 
5407 module_usb_driver(lan78xx_driver);
5408 
5409 MODULE_AUTHOR(DRIVER_AUTHOR);
5410 MODULE_DESCRIPTION(DRIVER_DESC);
5411 MODULE_LICENSE("GPL");
5412