xref: /linux/drivers/net/usb/lan78xx.c (revision c5fbdf0ba7c1a6ed52dc3650bee73ce00c86cf7f)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1084 		return lan78xx_write_reg(dev, HW_CFG, saved);
1085 
1086 	return 0;
1087 }
1088 
1089 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1090 			       u32 length, u8 *data)
1091 {
1092 	int ret;
1093 	u8 sig;
1094 
1095 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1096 	if (ret < 0)
1097 		return ret;
1098 
1099 	if (sig != EEPROM_INDICATOR)
1100 		return -ENODATA;
1101 
1102 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1103 }
1104 
1105 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1106 				    u32 length, u8 *data)
1107 {
1108 	u32 val;
1109 	u32 saved;
1110 	int i, ret;
1111 
1112 	/* depends on chip, some EEPROM pins are muxed with LED function.
1113 	 * disable & restore LED function to access EEPROM.
1114 	 */
1115 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1116 	if (ret < 0)
1117 		return ret;
1118 
1119 	saved = val;
1120 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1121 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1122 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1123 		if (ret < 0)
1124 			return ret;
1125 	}
1126 
1127 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1128 	/* Looks like not USB specific error, try to recover */
1129 	if (ret == -ETIMEDOUT)
1130 		goto write_raw_eeprom_done;
1131 	/* If USB fails, there is nothing to do */
1132 	if (ret < 0)
1133 		return ret;
1134 
1135 	/* Issue write/erase enable command */
1136 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1137 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1138 	if (ret < 0)
1139 		return ret;
1140 
1141 	ret = lan78xx_wait_eeprom(dev);
1142 	/* Looks like not USB specific error, try to recover */
1143 	if (ret == -ETIMEDOUT)
1144 		goto write_raw_eeprom_done;
1145 	/* If USB fails, there is nothing to do */
1146 	if (ret < 0)
1147 		return ret;
1148 
1149 	for (i = 0; i < length; i++) {
1150 		/* Fill data register */
1151 		val = data[i];
1152 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1153 		if (ret < 0)
1154 			return ret;
1155 
1156 		/* Send "write" command */
1157 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1158 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1159 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1160 		if (ret < 0)
1161 			return ret;
1162 
1163 		ret = lan78xx_wait_eeprom(dev);
1164 		/* Looks like not USB specific error, try to recover */
1165 		if (ret == -ETIMEDOUT)
1166 			goto write_raw_eeprom_done;
1167 		/* If USB fails, there is nothing to do */
1168 		if (ret < 0)
1169 			return ret;
1170 
1171 		offset++;
1172 	}
1173 
1174 write_raw_eeprom_done:
1175 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1176 		return lan78xx_write_reg(dev, HW_CFG, saved);
1177 
1178 	return 0;
1179 }
1180 
1181 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1182 				u32 length, u8 *data)
1183 {
1184 	unsigned long timeout;
1185 	int ret, i;
1186 	u32 buf;
1187 
1188 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1189 	if (ret < 0)
1190 		return ret;
1191 
1192 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1193 		/* clear it and wait to be cleared */
1194 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1195 		if (ret < 0)
1196 			return ret;
1197 
1198 		timeout = jiffies + HZ;
1199 		do {
1200 			usleep_range(1, 10);
1201 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1202 			if (ret < 0)
1203 				return ret;
1204 
1205 			if (time_after(jiffies, timeout)) {
1206 				netdev_warn(dev->net,
1207 					    "timeout on OTP_PWR_DN");
1208 				return -ETIMEDOUT;
1209 			}
1210 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1211 	}
1212 
1213 	for (i = 0; i < length; i++) {
1214 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1215 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1216 		if (ret < 0)
1217 			return ret;
1218 
1219 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1220 					((offset + i) & OTP_ADDR2_10_3));
1221 		if (ret < 0)
1222 			return ret;
1223 
1224 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		timeout = jiffies + HZ;
1233 		do {
1234 			udelay(1);
1235 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1236 			if (ret < 0)
1237 				return ret;
1238 
1239 			if (time_after(jiffies, timeout)) {
1240 				netdev_warn(dev->net,
1241 					    "timeout on OTP_STATUS");
1242 				return -ETIMEDOUT;
1243 			}
1244 		} while (buf & OTP_STATUS_BUSY_);
1245 
1246 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1247 		if (ret < 0)
1248 			return ret;
1249 
1250 		data[i] = (u8)(buf & 0xFF);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1257 				 u32 length, u8 *data)
1258 {
1259 	int i;
1260 	u32 buf;
1261 	unsigned long timeout;
1262 	int ret;
1263 
1264 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1265 	if (ret < 0)
1266 		return ret;
1267 
1268 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1269 		/* clear it and wait to be cleared */
1270 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1271 		if (ret < 0)
1272 			return ret;
1273 
1274 		timeout = jiffies + HZ;
1275 		do {
1276 			udelay(1);
1277 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1278 			if (ret < 0)
1279 				return ret;
1280 
1281 			if (time_after(jiffies, timeout)) {
1282 				netdev_warn(dev->net,
1283 					    "timeout on OTP_PWR_DN completion");
1284 				return -ETIMEDOUT;
1285 			}
1286 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1287 	}
1288 
1289 	/* set to BYTE program mode */
1290 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1291 	if (ret < 0)
1292 		return ret;
1293 
1294 	for (i = 0; i < length; i++) {
1295 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1296 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1297 		if (ret < 0)
1298 			return ret;
1299 
1300 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1301 					((offset + i) & OTP_ADDR2_10_3));
1302 		if (ret < 0)
1303 			return ret;
1304 
1305 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		timeout = jiffies + HZ;
1318 		do {
1319 			udelay(1);
1320 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1321 			if (ret < 0)
1322 				return ret;
1323 
1324 			if (time_after(jiffies, timeout)) {
1325 				netdev_warn(dev->net,
1326 					    "Timeout on OTP_STATUS completion");
1327 				return -ETIMEDOUT;
1328 			}
1329 		} while (buf & OTP_STATUS_BUSY_);
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1336 			    u32 length, u8 *data)
1337 {
1338 	u8 sig;
1339 	int ret;
1340 
1341 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1342 
1343 	if (ret == 0) {
1344 		if (sig == OTP_INDICATOR_2)
1345 			offset += 0x100;
1346 		else if (sig != OTP_INDICATOR_1)
1347 			ret = -EINVAL;
1348 		if (!ret)
1349 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1350 	}
1351 
1352 	return ret;
1353 }
1354 
1355 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1356 {
1357 	int i, ret;
1358 
1359 	for (i = 0; i < 100; i++) {
1360 		u32 dp_sel;
1361 
1362 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1363 		if (unlikely(ret < 0))
1364 			return ret;
1365 
1366 		if (dp_sel & DP_SEL_DPRDY_)
1367 			return 0;
1368 
1369 		usleep_range(40, 100);
1370 	}
1371 
1372 	netdev_warn(dev->net, "%s timed out", __func__);
1373 
1374 	return -ETIMEDOUT;
1375 }
1376 
1377 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1378 				  u32 addr, u32 length, u32 *buf)
1379 {
1380 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1381 	int i, ret;
1382 
1383 	ret = usb_autopm_get_interface(dev->intf);
1384 	if (ret < 0)
1385 		return ret;
1386 
1387 	mutex_lock(&pdata->dataport_mutex);
1388 
1389 	ret = lan78xx_dataport_wait_not_busy(dev);
1390 	if (ret < 0)
1391 		goto dataport_write;
1392 
1393 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	for (i = 0; i < length; i++) {
1398 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1399 		if (ret < 0)
1400 			goto dataport_write;
1401 
1402 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_dataport_wait_not_busy(dev);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 	}
1414 
1415 dataport_write:
1416 	if (ret < 0)
1417 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1418 
1419 	mutex_unlock(&pdata->dataport_mutex);
1420 	usb_autopm_put_interface(dev->intf);
1421 
1422 	return ret;
1423 }
1424 
1425 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1426 				    int index, u8 addr[ETH_ALEN])
1427 {
1428 	u32 temp;
1429 
1430 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1431 		temp = addr[3];
1432 		temp = addr[2] | (temp << 8);
1433 		temp = addr[1] | (temp << 8);
1434 		temp = addr[0] | (temp << 8);
1435 		pdata->pfilter_table[index][1] = temp;
1436 		temp = addr[5];
1437 		temp = addr[4] | (temp << 8);
1438 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1439 		pdata->pfilter_table[index][0] = temp;
1440 	}
1441 }
1442 
1443 /* returns hash bit number for given MAC address */
1444 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1445 {
1446 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1447 }
1448 
1449 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1450 {
1451 	struct lan78xx_priv *pdata =
1452 			container_of(param, struct lan78xx_priv, set_multicast);
1453 	struct lan78xx_net *dev = pdata->dev;
1454 	int i, ret;
1455 
1456 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1457 		  pdata->rfe_ctl);
1458 
1459 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1460 				     DP_SEL_VHF_VLAN_LEN,
1461 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1462 	if (ret < 0)
1463 		goto multicast_write_done;
1464 
1465 	for (i = 1; i < NUM_OF_MAF; i++) {
1466 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1467 		if (ret < 0)
1468 			goto multicast_write_done;
1469 
1470 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1471 					pdata->pfilter_table[i][1]);
1472 		if (ret < 0)
1473 			goto multicast_write_done;
1474 
1475 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1476 					pdata->pfilter_table[i][0]);
1477 		if (ret < 0)
1478 			goto multicast_write_done;
1479 	}
1480 
1481 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1482 
1483 multicast_write_done:
1484 	if (ret < 0)
1485 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1486 	return;
1487 }
1488 
1489 static void lan78xx_set_multicast(struct net_device *netdev)
1490 {
1491 	struct lan78xx_net *dev = netdev_priv(netdev);
1492 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1493 	unsigned long flags;
1494 	int i;
1495 
1496 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1497 
1498 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1499 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1500 
1501 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1502 		pdata->mchash_table[i] = 0;
1503 
1504 	/* pfilter_table[0] has own HW address */
1505 	for (i = 1; i < NUM_OF_MAF; i++) {
1506 		pdata->pfilter_table[i][0] = 0;
1507 		pdata->pfilter_table[i][1] = 0;
1508 	}
1509 
1510 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1511 
1512 	if (dev->net->flags & IFF_PROMISC) {
1513 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1514 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1515 	} else {
1516 		if (dev->net->flags & IFF_ALLMULTI) {
1517 			netif_dbg(dev, drv, dev->net,
1518 				  "receive all multicast enabled");
1519 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1520 		}
1521 	}
1522 
1523 	if (netdev_mc_count(dev->net)) {
1524 		struct netdev_hw_addr *ha;
1525 		int i;
1526 
1527 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1528 
1529 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1530 
1531 		i = 1;
1532 		netdev_for_each_mc_addr(ha, netdev) {
1533 			/* set first 32 into Perfect Filter */
1534 			if (i < 33) {
1535 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1536 			} else {
1537 				u32 bitnum = lan78xx_hash(ha->addr);
1538 
1539 				pdata->mchash_table[bitnum / 32] |=
1540 							(1 << (bitnum % 32));
1541 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1542 			}
1543 			i++;
1544 		}
1545 	}
1546 
1547 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1548 
1549 	/* defer register writes to a sleepable context */
1550 	schedule_work(&pdata->set_multicast);
1551 }
1552 
1553 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1554 
1555 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1556 {
1557 	unsigned long start_time = jiffies;
1558 	u32 val;
1559 	int ret;
1560 
1561 	mutex_lock(&dev->mdiobus_mutex);
1562 
1563 	/* Resetting the device while there is activity on the MDIO
1564 	 * bus can result in the MAC interface locking up and not
1565 	 * completing register access transactions.
1566 	 */
1567 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1568 	if (ret < 0)
1569 		goto exit_unlock;
1570 
1571 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1572 	if (ret < 0)
1573 		goto exit_unlock;
1574 
1575 	val |= MAC_CR_RST_;
1576 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1577 	if (ret < 0)
1578 		goto exit_unlock;
1579 
1580 	/* Wait for the reset to complete before allowing any further
1581 	 * MAC register accesses otherwise the MAC may lock up.
1582 	 */
1583 	do {
1584 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1585 		if (ret < 0)
1586 			goto exit_unlock;
1587 
1588 		if (!(val & MAC_CR_RST_)) {
1589 			ret = 0;
1590 			goto exit_unlock;
1591 		}
1592 	} while (!time_after(jiffies, start_time + HZ));
1593 
1594 	ret = -ETIMEDOUT;
1595 exit_unlock:
1596 	mutex_unlock(&dev->mdiobus_mutex);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1603  * @dev: pointer to the LAN78xx device structure
1604  *
1605  * This function acknowledges the PHY interrupt by setting the
1606  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1607  *
1608  * Return: 0 on success or a negative error code on failure.
1609  */
1610 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1611 {
1612 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1613 }
1614 
1615 /* some work can't be done in tasklets, so we use keventd
1616  *
1617  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1618  * but tasklet_schedule() doesn't.	hope the failure is rare.
1619  */
1620 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1621 {
1622 	set_bit(work, &dev->flags);
1623 	if (!schedule_delayed_work(&dev->wq, 0))
1624 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1625 }
1626 
1627 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1628 {
1629 	u32 intdata;
1630 
1631 	if (urb->actual_length != 4) {
1632 		netdev_warn(dev->net,
1633 			    "unexpected urb length %d", urb->actual_length);
1634 		return;
1635 	}
1636 
1637 	intdata = get_unaligned_le32(urb->transfer_buffer);
1638 
1639 	if (intdata & INT_ENP_PHY_INT) {
1640 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1641 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1642 
1643 		if (dev->domain_data.phyirq > 0)
1644 			generic_handle_irq_safe(dev->domain_data.phyirq);
1645 	} else {
1646 		netdev_warn(dev->net,
1647 			    "unexpected interrupt: 0x%08x\n", intdata);
1648 	}
1649 }
1650 
1651 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1652 {
1653 	return MAX_EEPROM_SIZE;
1654 }
1655 
1656 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1657 				      struct ethtool_eeprom *ee, u8 *data)
1658 {
1659 	struct lan78xx_net *dev = netdev_priv(netdev);
1660 	int ret;
1661 
1662 	ret = usb_autopm_get_interface(dev->intf);
1663 	if (ret)
1664 		return ret;
1665 
1666 	ee->magic = LAN78XX_EEPROM_MAGIC;
1667 
1668 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1669 
1670 	usb_autopm_put_interface(dev->intf);
1671 
1672 	return ret;
1673 }
1674 
1675 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1676 				      struct ethtool_eeprom *ee, u8 *data)
1677 {
1678 	struct lan78xx_net *dev = netdev_priv(netdev);
1679 	int ret;
1680 
1681 	ret = usb_autopm_get_interface(dev->intf);
1682 	if (ret)
1683 		return ret;
1684 
1685 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1686 	 * to load data from EEPROM
1687 	 */
1688 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1689 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1690 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1691 		 (ee->offset == 0) &&
1692 		 (ee->len == 512) &&
1693 		 (data[0] == OTP_INDICATOR_1))
1694 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1695 
1696 	usb_autopm_put_interface(dev->intf);
1697 
1698 	return ret;
1699 }
1700 
1701 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1702 				u8 *data)
1703 {
1704 	if (stringset == ETH_SS_STATS)
1705 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1706 }
1707 
1708 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1709 {
1710 	if (sset == ETH_SS_STATS)
1711 		return ARRAY_SIZE(lan78xx_gstrings);
1712 	else
1713 		return -EOPNOTSUPP;
1714 }
1715 
1716 static void lan78xx_get_stats(struct net_device *netdev,
1717 			      struct ethtool_stats *stats, u64 *data)
1718 {
1719 	struct lan78xx_net *dev = netdev_priv(netdev);
1720 
1721 	lan78xx_update_stats(dev);
1722 
1723 	mutex_lock(&dev->stats.access_lock);
1724 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1725 	mutex_unlock(&dev->stats.access_lock);
1726 }
1727 
1728 static void lan78xx_get_wol(struct net_device *netdev,
1729 			    struct ethtool_wolinfo *wol)
1730 {
1731 	struct lan78xx_net *dev = netdev_priv(netdev);
1732 	int ret;
1733 	u32 buf;
1734 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1735 
1736 	if (usb_autopm_get_interface(dev->intf) < 0)
1737 		return;
1738 
1739 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1740 	if (unlikely(ret < 0)) {
1741 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1742 		wol->supported = 0;
1743 		wol->wolopts = 0;
1744 	} else {
1745 		if (buf & USB_CFG_RMT_WKP_) {
1746 			wol->supported = WAKE_ALL;
1747 			wol->wolopts = pdata->wol;
1748 		} else {
1749 			wol->supported = 0;
1750 			wol->wolopts = 0;
1751 		}
1752 	}
1753 
1754 	usb_autopm_put_interface(dev->intf);
1755 }
1756 
1757 static int lan78xx_set_wol(struct net_device *netdev,
1758 			   struct ethtool_wolinfo *wol)
1759 {
1760 	struct lan78xx_net *dev = netdev_priv(netdev);
1761 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1762 	int ret;
1763 
1764 	if (wol->wolopts & ~WAKE_ALL)
1765 		return -EINVAL;
1766 
1767 	ret = usb_autopm_get_interface(dev->intf);
1768 	if (ret < 0)
1769 		return ret;
1770 
1771 	pdata->wol = wol->wolopts;
1772 
1773 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1774 	if (ret < 0)
1775 		goto exit_pm_put;
1776 
1777 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1778 
1779 exit_pm_put:
1780 	usb_autopm_put_interface(dev->intf);
1781 
1782 	return ret;
1783 }
1784 
1785 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1786 {
1787 	struct lan78xx_net *dev = netdev_priv(net);
1788 
1789 	return phylink_ethtool_get_eee(dev->phylink, edata);
1790 }
1791 
1792 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1793 {
1794 	struct lan78xx_net *dev = netdev_priv(net);
1795 
1796 	return phylink_ethtool_set_eee(dev->phylink, edata);
1797 }
1798 
1799 static void lan78xx_get_drvinfo(struct net_device *net,
1800 				struct ethtool_drvinfo *info)
1801 {
1802 	struct lan78xx_net *dev = netdev_priv(net);
1803 
1804 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1805 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1806 }
1807 
1808 static u32 lan78xx_get_msglevel(struct net_device *net)
1809 {
1810 	struct lan78xx_net *dev = netdev_priv(net);
1811 
1812 	return dev->msg_enable;
1813 }
1814 
1815 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1816 {
1817 	struct lan78xx_net *dev = netdev_priv(net);
1818 
1819 	dev->msg_enable = level;
1820 }
1821 
1822 static int lan78xx_get_link_ksettings(struct net_device *net,
1823 				      struct ethtool_link_ksettings *cmd)
1824 {
1825 	struct lan78xx_net *dev = netdev_priv(net);
1826 
1827 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1828 }
1829 
1830 static int lan78xx_set_link_ksettings(struct net_device *net,
1831 				      const struct ethtool_link_ksettings *cmd)
1832 {
1833 	struct lan78xx_net *dev = netdev_priv(net);
1834 
1835 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1836 }
1837 
1838 static void lan78xx_get_pause(struct net_device *net,
1839 			      struct ethtool_pauseparam *pause)
1840 {
1841 	struct lan78xx_net *dev = netdev_priv(net);
1842 
1843 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1844 }
1845 
1846 static int lan78xx_set_pause(struct net_device *net,
1847 			     struct ethtool_pauseparam *pause)
1848 {
1849 	struct lan78xx_net *dev = netdev_priv(net);
1850 
1851 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1852 }
1853 
1854 static int lan78xx_get_regs_len(struct net_device *netdev)
1855 {
1856 	return sizeof(lan78xx_regs);
1857 }
1858 
1859 static void
1860 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1861 		 void *buf)
1862 {
1863 	struct lan78xx_net *dev = netdev_priv(netdev);
1864 	unsigned int data_count = 0;
1865 	u32 *data = buf;
1866 	int i, ret;
1867 
1868 	/* Read Device/MAC registers */
1869 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1870 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1871 		if (ret < 0) {
1872 			netdev_warn(dev->net,
1873 				    "failed to read register 0x%08x\n",
1874 				    lan78xx_regs[i]);
1875 			goto clean_data;
1876 		}
1877 
1878 		data_count++;
1879 	}
1880 
1881 	return;
1882 
1883 clean_data:
1884 	memset(data, 0, data_count * sizeof(u32));
1885 }
1886 
1887 static const struct ethtool_ops lan78xx_ethtool_ops = {
1888 	.get_link	= ethtool_op_get_link,
1889 	.nway_reset	= phy_ethtool_nway_reset,
1890 	.get_drvinfo	= lan78xx_get_drvinfo,
1891 	.get_msglevel	= lan78xx_get_msglevel,
1892 	.set_msglevel	= lan78xx_set_msglevel,
1893 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1894 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1895 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1896 	.get_ethtool_stats = lan78xx_get_stats,
1897 	.get_sset_count = lan78xx_get_sset_count,
1898 	.get_strings	= lan78xx_get_strings,
1899 	.get_wol	= lan78xx_get_wol,
1900 	.set_wol	= lan78xx_set_wol,
1901 	.get_ts_info	= ethtool_op_get_ts_info,
1902 	.get_eee	= lan78xx_get_eee,
1903 	.set_eee	= lan78xx_set_eee,
1904 	.get_pauseparam	= lan78xx_get_pause,
1905 	.set_pauseparam	= lan78xx_set_pause,
1906 	.get_link_ksettings = lan78xx_get_link_ksettings,
1907 	.set_link_ksettings = lan78xx_set_link_ksettings,
1908 	.get_regs_len	= lan78xx_get_regs_len,
1909 	.get_regs	= lan78xx_get_regs,
1910 };
1911 
1912 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1913 {
1914 	u32 addr_lo, addr_hi;
1915 	u8 addr[6];
1916 	int ret;
1917 
1918 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1919 	if (ret < 0)
1920 		return ret;
1921 
1922 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1923 	if (ret < 0)
1924 		return ret;
1925 
1926 	addr[0] = addr_lo & 0xFF;
1927 	addr[1] = (addr_lo >> 8) & 0xFF;
1928 	addr[2] = (addr_lo >> 16) & 0xFF;
1929 	addr[3] = (addr_lo >> 24) & 0xFF;
1930 	addr[4] = addr_hi & 0xFF;
1931 	addr[5] = (addr_hi >> 8) & 0xFF;
1932 
1933 	if (!is_valid_ether_addr(addr)) {
1934 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1935 			/* valid address present in Device Tree */
1936 			netif_dbg(dev, ifup, dev->net,
1937 				  "MAC address read from Device Tree");
1938 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1939 						 ETH_ALEN, addr) == 0) ||
1940 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1941 					      ETH_ALEN, addr) == 0)) &&
1942 			   is_valid_ether_addr(addr)) {
1943 			/* eeprom values are valid so use them */
1944 			netif_dbg(dev, ifup, dev->net,
1945 				  "MAC address read from EEPROM");
1946 		} else {
1947 			/* generate random MAC */
1948 			eth_random_addr(addr);
1949 			netif_dbg(dev, ifup, dev->net,
1950 				  "MAC address set to random addr");
1951 		}
1952 
1953 		addr_lo = addr[0] | (addr[1] << 8) |
1954 			  (addr[2] << 16) | (addr[3] << 24);
1955 		addr_hi = addr[4] | (addr[5] << 8);
1956 
1957 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1958 		if (ret < 0)
1959 			return ret;
1960 
1961 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1962 		if (ret < 0)
1963 			return ret;
1964 	}
1965 
1966 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1967 	if (ret < 0)
1968 		return ret;
1969 
1970 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1971 	if (ret < 0)
1972 		return ret;
1973 
1974 	eth_hw_addr_set(dev->net, addr);
1975 
1976 	return 0;
1977 }
1978 
1979 /* MDIO read and write wrappers for phylib */
1980 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1981 {
1982 	struct lan78xx_net *dev = bus->priv;
1983 	u32 val, addr;
1984 	int ret;
1985 
1986 	ret = usb_autopm_get_interface(dev->intf);
1987 	if (ret < 0)
1988 		return ret;
1989 
1990 	mutex_lock(&dev->mdiobus_mutex);
1991 
1992 	/* confirm MII not busy */
1993 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1994 	if (ret < 0)
1995 		goto done;
1996 
1997 	/* set the address, index & direction (read from PHY) */
1998 	addr = mii_access(phy_id, idx, MII_READ);
1999 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2000 	if (ret < 0)
2001 		goto done;
2002 
2003 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2004 	if (ret < 0)
2005 		goto done;
2006 
2007 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2008 	if (ret < 0)
2009 		goto done;
2010 
2011 	ret = (int)(val & 0xFFFF);
2012 
2013 done:
2014 	mutex_unlock(&dev->mdiobus_mutex);
2015 	usb_autopm_put_interface(dev->intf);
2016 
2017 	return ret;
2018 }
2019 
2020 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2021 				 u16 regval)
2022 {
2023 	struct lan78xx_net *dev = bus->priv;
2024 	u32 val, addr;
2025 	int ret;
2026 
2027 	ret = usb_autopm_get_interface(dev->intf);
2028 	if (ret < 0)
2029 		return ret;
2030 
2031 	mutex_lock(&dev->mdiobus_mutex);
2032 
2033 	/* confirm MII not busy */
2034 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2035 	if (ret < 0)
2036 		goto done;
2037 
2038 	val = (u32)regval;
2039 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2040 	if (ret < 0)
2041 		goto done;
2042 
2043 	/* set the address, index & direction (write to PHY) */
2044 	addr = mii_access(phy_id, idx, MII_WRITE);
2045 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2046 	if (ret < 0)
2047 		goto done;
2048 
2049 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2050 	if (ret < 0)
2051 		goto done;
2052 
2053 done:
2054 	mutex_unlock(&dev->mdiobus_mutex);
2055 	usb_autopm_put_interface(dev->intf);
2056 	return ret;
2057 }
2058 
2059 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2060 {
2061 	struct device_node *node;
2062 	int ret;
2063 
2064 	dev->mdiobus = mdiobus_alloc();
2065 	if (!dev->mdiobus) {
2066 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2067 		return -ENOMEM;
2068 	}
2069 
2070 	dev->mdiobus->priv = (void *)dev;
2071 	dev->mdiobus->read = lan78xx_mdiobus_read;
2072 	dev->mdiobus->write = lan78xx_mdiobus_write;
2073 	dev->mdiobus->name = "lan78xx-mdiobus";
2074 	dev->mdiobus->parent = &dev->udev->dev;
2075 
2076 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2077 		 dev->udev->bus->busnum, dev->udev->devnum);
2078 
2079 	switch (dev->chipid) {
2080 	case ID_REV_CHIP_ID_7800_:
2081 	case ID_REV_CHIP_ID_7850_:
2082 		/* set to internal PHY id */
2083 		dev->mdiobus->phy_mask = ~(1 << 1);
2084 		break;
2085 	case ID_REV_CHIP_ID_7801_:
2086 		/* scan thru PHYAD[2..0] */
2087 		dev->mdiobus->phy_mask = ~(0xFF);
2088 		break;
2089 	}
2090 
2091 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2092 	ret = of_mdiobus_register(dev->mdiobus, node);
2093 	of_node_put(node);
2094 	if (ret) {
2095 		netdev_err(dev->net, "can't register MDIO bus\n");
2096 		goto exit1;
2097 	}
2098 
2099 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2100 	return 0;
2101 exit1:
2102 	mdiobus_free(dev->mdiobus);
2103 	return ret;
2104 }
2105 
2106 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2107 {
2108 	mdiobus_unregister(dev->mdiobus);
2109 	mdiobus_free(dev->mdiobus);
2110 }
2111 
2112 static int irq_map(struct irq_domain *d, unsigned int irq,
2113 		   irq_hw_number_t hwirq)
2114 {
2115 	struct irq_domain_data *data = d->host_data;
2116 
2117 	irq_set_chip_data(irq, data);
2118 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2119 	irq_set_noprobe(irq);
2120 
2121 	return 0;
2122 }
2123 
2124 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2125 {
2126 	irq_set_chip_and_handler(irq, NULL, NULL);
2127 	irq_set_chip_data(irq, NULL);
2128 }
2129 
2130 static const struct irq_domain_ops chip_domain_ops = {
2131 	.map	= irq_map,
2132 	.unmap	= irq_unmap,
2133 };
2134 
2135 static void lan78xx_irq_mask(struct irq_data *irqd)
2136 {
2137 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2138 
2139 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2140 }
2141 
2142 static void lan78xx_irq_unmask(struct irq_data *irqd)
2143 {
2144 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2145 
2146 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2147 }
2148 
2149 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2150 {
2151 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2152 
2153 	mutex_lock(&data->irq_lock);
2154 }
2155 
2156 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2157 {
2158 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2159 	struct lan78xx_net *dev =
2160 			container_of(data, struct lan78xx_net, domain_data);
2161 	u32 buf;
2162 	int ret;
2163 
2164 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2165 	 * are only two callbacks executed in non-atomic contex.
2166 	 */
2167 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2168 	if (ret < 0)
2169 		goto irq_bus_sync_unlock;
2170 
2171 	if (buf != data->irqenable)
2172 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2173 
2174 irq_bus_sync_unlock:
2175 	if (ret < 0)
2176 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2177 			   ERR_PTR(ret));
2178 
2179 	mutex_unlock(&data->irq_lock);
2180 }
2181 
2182 static struct irq_chip lan78xx_irqchip = {
2183 	.name			= "lan78xx-irqs",
2184 	.irq_mask		= lan78xx_irq_mask,
2185 	.irq_unmask		= lan78xx_irq_unmask,
2186 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2187 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2188 };
2189 
2190 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2191 {
2192 	struct irq_domain *irqdomain;
2193 	unsigned int irqmap = 0;
2194 	u32 buf;
2195 	int ret = 0;
2196 
2197 	mutex_init(&dev->domain_data.irq_lock);
2198 
2199 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2200 	if (ret < 0)
2201 		return ret;
2202 
2203 	dev->domain_data.irqenable = buf;
2204 
2205 	dev->domain_data.irqchip = &lan78xx_irqchip;
2206 	dev->domain_data.irq_handler = handle_simple_irq;
2207 
2208 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2209 					     &chip_domain_ops, &dev->domain_data);
2210 	if (irqdomain) {
2211 		/* create mapping for PHY interrupt */
2212 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2213 		if (!irqmap) {
2214 			irq_domain_remove(irqdomain);
2215 
2216 			irqdomain = NULL;
2217 			ret = -EINVAL;
2218 		}
2219 	} else {
2220 		ret = -EINVAL;
2221 	}
2222 
2223 	dev->domain_data.irqdomain = irqdomain;
2224 	dev->domain_data.phyirq = irqmap;
2225 
2226 	return ret;
2227 }
2228 
2229 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2230 {
2231 	if (dev->domain_data.phyirq > 0) {
2232 		irq_dispose_mapping(dev->domain_data.phyirq);
2233 
2234 		if (dev->domain_data.irqdomain)
2235 			irq_domain_remove(dev->domain_data.irqdomain);
2236 	}
2237 	dev->domain_data.phyirq = 0;
2238 	dev->domain_data.irqdomain = NULL;
2239 }
2240 
2241 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2242 			       const struct phylink_link_state *state)
2243 {
2244 	struct net_device *net = to_net_dev(config->dev);
2245 	struct lan78xx_net *dev = netdev_priv(net);
2246 	u32 mac_cr = 0;
2247 	int ret;
2248 
2249 	/* Check if the mode is supported */
2250 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2251 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2252 		return;
2253 	}
2254 
2255 	switch (state->interface) {
2256 	case PHY_INTERFACE_MODE_GMII:
2257 		mac_cr |= MAC_CR_GMII_EN_;
2258 		break;
2259 	case PHY_INTERFACE_MODE_RGMII:
2260 	case PHY_INTERFACE_MODE_RGMII_ID:
2261 	case PHY_INTERFACE_MODE_RGMII_TXID:
2262 	case PHY_INTERFACE_MODE_RGMII_RXID:
2263 		break;
2264 	default:
2265 		netdev_warn(net, "Unsupported interface mode: %d\n",
2266 			    state->interface);
2267 		return;
2268 	}
2269 
2270 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2271 	if (ret < 0)
2272 		netdev_err(net, "Failed to config MAC with error %pe\n",
2273 			   ERR_PTR(ret));
2274 }
2275 
2276 static void lan78xx_mac_link_down(struct phylink_config *config,
2277 				  unsigned int mode, phy_interface_t interface)
2278 {
2279 	struct net_device *net = to_net_dev(config->dev);
2280 	struct lan78xx_net *dev = netdev_priv(net);
2281 	int ret;
2282 
2283 	netif_stop_queue(net);
2284 
2285 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2286 	 * manually before reset. TX and RX should be disabled before running
2287 	 * link_up sequence.
2288 	 */
2289 	ret = lan78xx_stop_tx_path(dev);
2290 	if (ret < 0)
2291 		goto link_down_fail;
2292 
2293 	ret = lan78xx_stop_rx_path(dev);
2294 	if (ret < 0)
2295 		goto link_down_fail;
2296 
2297 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2298 	 * really needed, but it was done in previous driver version. So, leave
2299 	 * it here.
2300 	 */
2301 	ret = lan78xx_mac_reset(dev);
2302 	if (ret < 0)
2303 		goto link_down_fail;
2304 
2305 	return;
2306 
2307 link_down_fail:
2308 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2309 		   ERR_PTR(ret));
2310 }
2311 
2312 /**
2313  * lan78xx_configure_usb - Configure USB link power settings
2314  * @dev: pointer to the LAN78xx device structure
2315  * @speed: negotiated Ethernet link speed (in Mbps)
2316  *
2317  * This function configures U1/U2 link power management for SuperSpeed
2318  * USB devices based on the current Ethernet link speed. It uses the
2319  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2320  *
2321  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2322  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2323  *
2324  * Return: 0 on success or a negative error code on failure.
2325  */
2326 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2327 {
2328 	u32 mask, val;
2329 	int ret;
2330 
2331 	/* Only configure USB settings for SuperSpeed devices */
2332 	if (dev->udev->speed != USB_SPEED_SUPER)
2333 		return 0;
2334 
2335 	/* LAN7850 does not support USB 3.x */
2336 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2337 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2338 		return 0;
2339 	}
2340 
2341 	switch (speed) {
2342 	case SPEED_1000:
2343 		/* Disable U2, enable U1 */
2344 		ret = lan78xx_update_reg(dev, USB_CFG1,
2345 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2346 		if (ret < 0)
2347 			return ret;
2348 
2349 		return lan78xx_update_reg(dev, USB_CFG1,
2350 					  USB_CFG1_DEV_U1_INIT_EN_,
2351 					  USB_CFG1_DEV_U1_INIT_EN_);
2352 
2353 	case SPEED_100:
2354 	case SPEED_10:
2355 		/* Enable both U1 and U2 */
2356 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2357 		val = mask;
2358 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2359 
2360 	default:
2361 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2362 		return -EINVAL;
2363 	}
2364 }
2365 
2366 /**
2367  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2368  * @dev: pointer to the LAN78xx device structure
2369  * @tx_pause: enable transmission of pause frames
2370  * @rx_pause: enable reception of pause frames
2371  *
2372  * This function configures the LAN78xx flow control settings by writing
2373  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2374  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2375  * based on USB speed.
2376  *
2377  * The Pause Time field is measured in units of 512-bit times (quanta):
2378  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2379  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2380  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2381  *
2382  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2383  *   - RXUSED is the number of bytes used in the RX FIFO
2384  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2385  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2386  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2387  *
2388  * Thresholds differ by USB speed because available USB bandwidth
2389  * affects how fast packets can be drained from the RX FIFO:
2390  *   - USB 3.x (SuperSpeed):
2391  *       FLOW_ON  = 9216 bytes → 18 units
2392  *       FLOW_OFF = 4096 bytes →  8 units
2393  *   - USB 2.0 (High-Speed):
2394  *       FLOW_ON  = 8704 bytes → 17 units
2395  *       FLOW_OFF = 1024 bytes →  2 units
2396  *
2397  * Note: The FCT_FLOW register must be configured before enabling TX pause
2398  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2399  *
2400  * Return: 0 on success or a negative error code on failure.
2401  */
2402 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2403 					 bool tx_pause, bool rx_pause)
2404 {
2405 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2406 	const u32 pause_time_quanta = 65535;
2407 	u32 fct_flow = 0;
2408 	u32 flow = 0;
2409 	int ret;
2410 
2411 	/* Prepare MAC flow control bits */
2412 	if (tx_pause)
2413 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2414 
2415 	if (rx_pause)
2416 		flow |= FLOW_CR_RX_FCEN_;
2417 
2418 	/* Select RX FIFO thresholds based on USB speed
2419 	 *
2420 	 * FCT_FLOW layout:
2421 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2422 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2423 	 *   thresholds are expressed in units of 512 bytes
2424 	 */
2425 	switch (dev->udev->speed) {
2426 	case USB_SPEED_SUPER:
2427 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2428 		break;
2429 	case USB_SPEED_HIGH:
2430 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2431 		break;
2432 	default:
2433 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2434 			    dev->udev->speed);
2435 		return -EINVAL;
2436 	}
2437 
2438 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2439 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2440 	if (ret < 0)
2441 		return ret;
2442 
2443 	/* Step 2: Enable MAC pause functionality */
2444 	return lan78xx_write_reg(dev, FLOW, flow);
2445 }
2446 
2447 static void lan78xx_mac_link_up(struct phylink_config *config,
2448 				struct phy_device *phy,
2449 				unsigned int mode, phy_interface_t interface,
2450 				int speed, int duplex,
2451 				bool tx_pause, bool rx_pause)
2452 {
2453 	struct net_device *net = to_net_dev(config->dev);
2454 	struct lan78xx_net *dev = netdev_priv(net);
2455 	u32 mac_cr = 0;
2456 	int ret;
2457 
2458 	switch (speed) {
2459 	case SPEED_1000:
2460 		mac_cr |= MAC_CR_SPEED_1000_;
2461 		break;
2462 	case SPEED_100:
2463 		mac_cr |= MAC_CR_SPEED_100_;
2464 		break;
2465 	case SPEED_10:
2466 		mac_cr |= MAC_CR_SPEED_10_;
2467 		break;
2468 	default:
2469 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2470 		return;
2471 	}
2472 
2473 	if (duplex == DUPLEX_FULL)
2474 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2475 
2476 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2477 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2478 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2479 	if (ret < 0)
2480 		goto link_up_fail;
2481 
2482 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2483 	if (ret < 0)
2484 		goto link_up_fail;
2485 
2486 	ret = lan78xx_configure_usb(dev, speed);
2487 	if (ret < 0)
2488 		goto link_up_fail;
2489 
2490 	lan78xx_rx_urb_submit_all(dev);
2491 
2492 	ret = lan78xx_flush_rx_fifo(dev);
2493 	if (ret < 0)
2494 		goto link_up_fail;
2495 
2496 	ret = lan78xx_flush_tx_fifo(dev);
2497 	if (ret < 0)
2498 		goto link_up_fail;
2499 
2500 	ret = lan78xx_start_tx_path(dev);
2501 	if (ret < 0)
2502 		goto link_up_fail;
2503 
2504 	ret = lan78xx_start_rx_path(dev);
2505 	if (ret < 0)
2506 		goto link_up_fail;
2507 
2508 	netif_start_queue(net);
2509 
2510 	return;
2511 
2512 link_up_fail:
2513 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2514 		   ERR_PTR(ret));
2515 }
2516 
2517 /**
2518  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2519  * @dev: LAN78xx device
2520  * @enable: true to enable EEE, false to disable
2521  *
2522  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2523  * Efficient Ethernet (EEE) operation. According to current understanding
2524  * of the LAN7800 documentation, this bit can be modified while TX and RX
2525  * are enabled. No explicit requirement was found to disable data paths
2526  * before changing this bit.
2527  *
2528  * Return: 0 on success or a negative error code
2529  */
2530 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2531 {
2532 	u32 mac_cr = 0;
2533 
2534 	if (enable)
2535 		mac_cr |= MAC_CR_EEE_EN_;
2536 
2537 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2538 }
2539 
2540 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2541 {
2542 	struct net_device *net = to_net_dev(config->dev);
2543 	struct lan78xx_net *dev = netdev_priv(net);
2544 
2545 	lan78xx_mac_eee_enable(dev, false);
2546 }
2547 
2548 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2549 				     bool tx_clk_stop)
2550 {
2551 	struct net_device *net = to_net_dev(config->dev);
2552 	struct lan78xx_net *dev = netdev_priv(net);
2553 	int ret;
2554 
2555 	/* Software should only change this field when Energy Efficient
2556 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2557 	 * EEEEN during probe, and phylink itself guarantees that
2558 	 * mac_disable_tx_lpi() will have been previously called.
2559 	 */
2560 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2561 	if (ret < 0)
2562 		return ret;
2563 
2564 	return lan78xx_mac_eee_enable(dev, true);
2565 }
2566 
2567 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2568 	.mac_config = lan78xx_mac_config,
2569 	.mac_link_down = lan78xx_mac_link_down,
2570 	.mac_link_up = lan78xx_mac_link_up,
2571 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2572 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2573 };
2574 
2575 /**
2576  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2577  * @dev: LAN78xx device
2578  *
2579  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2580  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2581  * to a switch without a visible PHY.
2582  *
2583  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2584  */
2585 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2586 {
2587 	static const struct phylink_link_state state = {
2588 		.speed = SPEED_1000,
2589 		.duplex = DUPLEX_FULL,
2590 	};
2591 
2592 	netdev_info(dev->net,
2593 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2594 
2595 	return phylink_set_fixed_link(dev->phylink, &state);
2596 }
2597 
2598 /**
2599  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2600  * @dev: LAN78xx device structure
2601  *
2602  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2603  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2604  * sets dev->interface based on chip ID and detected PHY type.
2605  *
2606  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2607  */
2608 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2609 {
2610 	struct phy_device *phydev;
2611 
2612 	/* Attempt to locate a PHY on the MDIO bus */
2613 	phydev = phy_find_first(dev->mdiobus);
2614 
2615 	switch (dev->chipid) {
2616 	case ID_REV_CHIP_ID_7801_:
2617 		if (phydev) {
2618 			/* External RGMII PHY detected */
2619 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2620 			phydev->is_internal = false;
2621 
2622 			if (!phydev->drv)
2623 				netdev_warn(dev->net,
2624 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2625 
2626 			return phydev;
2627 		}
2628 
2629 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2630 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2631 		return NULL;
2632 
2633 	case ID_REV_CHIP_ID_7800_:
2634 	case ID_REV_CHIP_ID_7850_:
2635 		if (!phydev)
2636 			return ERR_PTR(-ENODEV);
2637 
2638 		/* These use internal GMII-connected PHY */
2639 		dev->interface = PHY_INTERFACE_MODE_GMII;
2640 		phydev->is_internal = true;
2641 		return phydev;
2642 
2643 	default:
2644 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2645 		return ERR_PTR(-ENODEV);
2646 	}
2647 }
2648 
2649 /**
2650  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2651  * @dev: LAN78xx device
2652  *
2653  * Configure MAC-side registers according to dev->interface, which should be
2654  * set by lan78xx_get_phy().
2655  *
2656  * - For PHY_INTERFACE_MODE_RGMII:
2657  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2658  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2659  *   connected to the KSZ9897 switch, and the link timing is expected to be
2660  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2661  *   assumed here.
2662  *
2663  * - For PHY_INTERFACE_MODE_RGMII_ID:
2664  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2665  *
2666  * - For GMII, no MAC-specific config is needed.
2667  *
2668  * Return: 0 on success or a negative error code.
2669  */
2670 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2671 {
2672 	int ret;
2673 
2674 	switch (dev->interface) {
2675 	case PHY_INTERFACE_MODE_RGMII:
2676 		/* Enable MAC-side TX clock delay */
2677 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2678 					MAC_RGMII_ID_TXC_DELAY_EN_);
2679 		if (ret < 0)
2680 			return ret;
2681 
2682 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2683 		if (ret < 0)
2684 			return ret;
2685 
2686 		ret = lan78xx_update_reg(dev, HW_CFG,
2687 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2688 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2689 		if (ret < 0)
2690 			return ret;
2691 
2692 		break;
2693 
2694 	case PHY_INTERFACE_MODE_RGMII_ID:
2695 		/* Disable MAC-side TXC delay, PHY provides it */
2696 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2697 		if (ret < 0)
2698 			return ret;
2699 
2700 		break;
2701 
2702 	case PHY_INTERFACE_MODE_GMII:
2703 		/* No MAC-specific configuration required */
2704 		break;
2705 
2706 	default:
2707 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2708 			    dev->interface);
2709 		break;
2710 	}
2711 
2712 	return 0;
2713 }
2714 
2715 /**
2716  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2717  * @dev: LAN78xx device
2718  * @phydev: PHY device (must be valid)
2719  *
2720  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2721  * the corresponding number of LEDs by writing to HW_CFG.
2722  *
2723  * This helper preserves the original logic, enabling up to 4 LEDs.
2724  * If the property is not present, this function does nothing.
2725  *
2726  * Return: 0 on success or a negative error code.
2727  */
2728 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2729 					  struct phy_device *phydev)
2730 {
2731 	struct device_node *np = phydev->mdio.dev.of_node;
2732 	u32 reg;
2733 	int len, ret;
2734 
2735 	if (!np)
2736 		return 0;
2737 
2738 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2739 					      sizeof(u32));
2740 	if (len < 0)
2741 		return 0;
2742 
2743 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2744 	if (ret < 0)
2745 		return ret;
2746 
2747 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2748 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2749 
2750 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2751 	       (len > 1) * HW_CFG_LED1_EN_ |
2752 	       (len > 2) * HW_CFG_LED2_EN_ |
2753 	       (len > 3) * HW_CFG_LED3_EN_;
2754 
2755 	return lan78xx_write_reg(dev, HW_CFG, reg);
2756 }
2757 
2758 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2759 {
2760 	struct phylink_config *pc = &dev->phylink_config;
2761 	struct phylink *phylink;
2762 
2763 	pc->dev = &dev->net->dev;
2764 	pc->type = PHYLINK_NETDEV;
2765 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2766 			       MAC_100 | MAC_1000FD;
2767 	pc->mac_managed_pm = true;
2768 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2769 	/*
2770 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2771 	 *
2772 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2773 	 *
2774 	 * Reasoning:
2775 	 * According to the application note in the LAN7800 documentation, a
2776 	 * zero delay may negatively impact the TX data path’s ability to
2777 	 * support Gigabit operation. A value of 50us is recommended as a
2778 	 * reasonable default when the part operates at Gigabit speeds,
2779 	 * balancing stability and power efficiency in EEE mode. This delay can
2780 	 * be increased based on performance testing, as EEE is designed for
2781 	 * scenarios with mostly idle links and occasional bursts of full
2782 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2783 	 * performance without overly aggressive power optimization during
2784 	 * inactive periods.
2785 	 */
2786 	pc->lpi_timer_default = 50;
2787 	pc->eee_enabled_default = true;
2788 
2789 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2790 		phy_interface_set_rgmii(pc->supported_interfaces);
2791 	else
2792 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2793 
2794 	memcpy(dev->phylink_config.lpi_interfaces,
2795 	       dev->phylink_config.supported_interfaces,
2796 	       sizeof(dev->phylink_config.lpi_interfaces));
2797 
2798 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2799 				 dev->interface, &lan78xx_phylink_mac_ops);
2800 	if (IS_ERR(phylink))
2801 		return PTR_ERR(phylink);
2802 
2803 	dev->phylink = phylink;
2804 
2805 	return 0;
2806 }
2807 
2808 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2809 {
2810 	if (dev->phylink) {
2811 		phylink_disconnect_phy(dev->phylink);
2812 		phylink_destroy(dev->phylink);
2813 		dev->phylink = NULL;
2814 	}
2815 }
2816 
2817 static int lan78xx_phy_init(struct lan78xx_net *dev)
2818 {
2819 	struct phy_device *phydev;
2820 	int ret;
2821 
2822 	phydev = lan78xx_get_phy(dev);
2823 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2824 	 * which will use a fixed link later.
2825 	 * If an  error occurs, return the error code immediately.
2826 	 */
2827 	if (IS_ERR(phydev))
2828 		return PTR_ERR(phydev);
2829 
2830 	ret = lan78xx_phylink_setup(dev);
2831 	if (ret < 0)
2832 		return ret;
2833 
2834 	/* If no PHY is found, set up a fixed link. It is very specific to
2835 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2836 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2837 	 * a visible PHY.
2838 	 */
2839 	if (!phydev) {
2840 		ret = lan78xx_set_fixed_link(dev);
2841 		if (ret < 0)
2842 			goto phylink_uninit;
2843 	}
2844 
2845 	ret = lan78xx_mac_prepare_for_phy(dev);
2846 	if (ret < 0)
2847 		goto phylink_uninit;
2848 
2849 	/* if phyirq is not set, use polling mode in phylib */
2850 	if (dev->domain_data.phyirq > 0)
2851 		phydev->irq = dev->domain_data.phyirq;
2852 	else
2853 		phydev->irq = PHY_POLL;
2854 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2855 
2856 	ret = phylink_connect_phy(dev->phylink, phydev);
2857 	if (ret) {
2858 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2859 			   dev->mdiobus->id, ERR_PTR(ret));
2860 		goto phylink_uninit;
2861 	}
2862 
2863 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2864 	if (ret < 0)
2865 		goto phylink_uninit;
2866 
2867 	return 0;
2868 
2869 phylink_uninit:
2870 	lan78xx_phy_uninit(dev);
2871 
2872 	return ret;
2873 }
2874 
2875 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2876 {
2877 	bool rxenabled;
2878 	u32 buf;
2879 	int ret;
2880 
2881 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2882 	if (ret < 0)
2883 		return ret;
2884 
2885 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2886 
2887 	if (rxenabled) {
2888 		buf &= ~MAC_RX_RXEN_;
2889 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2890 		if (ret < 0)
2891 			return ret;
2892 	}
2893 
2894 	/* add 4 to size for FCS */
2895 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2896 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2897 
2898 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2899 	if (ret < 0)
2900 		return ret;
2901 
2902 	if (rxenabled) {
2903 		buf |= MAC_RX_RXEN_;
2904 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2905 		if (ret < 0)
2906 			return ret;
2907 	}
2908 
2909 	return 0;
2910 }
2911 
2912 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2913 {
2914 	struct sk_buff *skb;
2915 	unsigned long flags;
2916 	int count = 0;
2917 
2918 	spin_lock_irqsave(&q->lock, flags);
2919 	while (!skb_queue_empty(q)) {
2920 		struct skb_data	*entry;
2921 		struct urb *urb;
2922 		int ret;
2923 
2924 		skb_queue_walk(q, skb) {
2925 			entry = (struct skb_data *)skb->cb;
2926 			if (entry->state != unlink_start)
2927 				goto found;
2928 		}
2929 		break;
2930 found:
2931 		entry->state = unlink_start;
2932 		urb = entry->urb;
2933 
2934 		/* Get reference count of the URB to avoid it to be
2935 		 * freed during usb_unlink_urb, which may trigger
2936 		 * use-after-free problem inside usb_unlink_urb since
2937 		 * usb_unlink_urb is always racing with .complete
2938 		 * handler(include defer_bh).
2939 		 */
2940 		usb_get_urb(urb);
2941 		spin_unlock_irqrestore(&q->lock, flags);
2942 		/* during some PM-driven resume scenarios,
2943 		 * these (async) unlinks complete immediately
2944 		 */
2945 		ret = usb_unlink_urb(urb);
2946 		if (ret != -EINPROGRESS && ret != 0)
2947 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2948 		else
2949 			count++;
2950 		usb_put_urb(urb);
2951 		spin_lock_irqsave(&q->lock, flags);
2952 	}
2953 	spin_unlock_irqrestore(&q->lock, flags);
2954 	return count;
2955 }
2956 
2957 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2958 {
2959 	struct lan78xx_net *dev = netdev_priv(netdev);
2960 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2961 	int ret;
2962 
2963 	/* no second zero-length packet read wanted after mtu-sized packets */
2964 	if ((max_frame_len % dev->maxpacket) == 0)
2965 		return -EDOM;
2966 
2967 	ret = usb_autopm_get_interface(dev->intf);
2968 	if (ret < 0)
2969 		return ret;
2970 
2971 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2972 	if (ret < 0)
2973 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2974 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2975 	else
2976 		WRITE_ONCE(netdev->mtu, new_mtu);
2977 
2978 	usb_autopm_put_interface(dev->intf);
2979 
2980 	return ret;
2981 }
2982 
2983 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2984 {
2985 	struct lan78xx_net *dev = netdev_priv(netdev);
2986 	struct sockaddr *addr = p;
2987 	u32 addr_lo, addr_hi;
2988 	int ret;
2989 
2990 	if (netif_running(netdev))
2991 		return -EBUSY;
2992 
2993 	if (!is_valid_ether_addr(addr->sa_data))
2994 		return -EADDRNOTAVAIL;
2995 
2996 	eth_hw_addr_set(netdev, addr->sa_data);
2997 
2998 	addr_lo = netdev->dev_addr[0] |
2999 		  netdev->dev_addr[1] << 8 |
3000 		  netdev->dev_addr[2] << 16 |
3001 		  netdev->dev_addr[3] << 24;
3002 	addr_hi = netdev->dev_addr[4] |
3003 		  netdev->dev_addr[5] << 8;
3004 
3005 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3006 	if (ret < 0)
3007 		return ret;
3008 
3009 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3010 	if (ret < 0)
3011 		return ret;
3012 
3013 	/* Added to support MAC address changes */
3014 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3015 	if (ret < 0)
3016 		return ret;
3017 
3018 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3019 }
3020 
3021 /* Enable or disable Rx checksum offload engine */
3022 static int lan78xx_set_features(struct net_device *netdev,
3023 				netdev_features_t features)
3024 {
3025 	struct lan78xx_net *dev = netdev_priv(netdev);
3026 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3027 	unsigned long flags;
3028 
3029 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3030 
3031 	if (features & NETIF_F_RXCSUM) {
3032 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3033 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3034 	} else {
3035 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3036 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3037 	}
3038 
3039 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3040 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3041 	else
3042 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3043 
3044 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3045 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3046 	else
3047 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3048 
3049 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3050 
3051 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3052 }
3053 
3054 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3055 {
3056 	struct lan78xx_priv *pdata =
3057 			container_of(param, struct lan78xx_priv, set_vlan);
3058 	struct lan78xx_net *dev = pdata->dev;
3059 
3060 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3061 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3062 }
3063 
3064 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3065 				   __be16 proto, u16 vid)
3066 {
3067 	struct lan78xx_net *dev = netdev_priv(netdev);
3068 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3069 	u16 vid_bit_index;
3070 	u16 vid_dword_index;
3071 
3072 	vid_dword_index = (vid >> 5) & 0x7F;
3073 	vid_bit_index = vid & 0x1F;
3074 
3075 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3076 
3077 	/* defer register writes to a sleepable context */
3078 	schedule_work(&pdata->set_vlan);
3079 
3080 	return 0;
3081 }
3082 
3083 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3084 				    __be16 proto, u16 vid)
3085 {
3086 	struct lan78xx_net *dev = netdev_priv(netdev);
3087 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3088 	u16 vid_bit_index;
3089 	u16 vid_dword_index;
3090 
3091 	vid_dword_index = (vid >> 5) & 0x7F;
3092 	vid_bit_index = vid & 0x1F;
3093 
3094 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3095 
3096 	/* defer register writes to a sleepable context */
3097 	schedule_work(&pdata->set_vlan);
3098 
3099 	return 0;
3100 }
3101 
3102 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3103 {
3104 	u32 regs[6] = { 0 };
3105 	int ret;
3106 	u32 buf;
3107 
3108 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3109 	if (ret < 0)
3110 		goto init_ltm_failed;
3111 
3112 	if (buf & USB_CFG1_LTM_ENABLE_) {
3113 		u8 temp[2];
3114 		/* Get values from EEPROM first */
3115 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3116 			if (temp[0] == 24) {
3117 				ret = lan78xx_read_raw_eeprom(dev,
3118 							      temp[1] * 2,
3119 							      24,
3120 							      (u8 *)regs);
3121 				if (ret < 0)
3122 					return ret;
3123 			}
3124 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3125 			if (temp[0] == 24) {
3126 				ret = lan78xx_read_raw_otp(dev,
3127 							   temp[1] * 2,
3128 							   24,
3129 							   (u8 *)regs);
3130 				if (ret < 0)
3131 					return ret;
3132 			}
3133 		}
3134 	}
3135 
3136 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3137 	if (ret < 0)
3138 		goto init_ltm_failed;
3139 
3140 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3141 	if (ret < 0)
3142 		goto init_ltm_failed;
3143 
3144 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3145 	if (ret < 0)
3146 		goto init_ltm_failed;
3147 
3148 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3149 	if (ret < 0)
3150 		goto init_ltm_failed;
3151 
3152 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3153 	if (ret < 0)
3154 		goto init_ltm_failed;
3155 
3156 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3157 	if (ret < 0)
3158 		goto init_ltm_failed;
3159 
3160 	return 0;
3161 
3162 init_ltm_failed:
3163 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3164 	return ret;
3165 }
3166 
3167 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3168 {
3169 	int result = 0;
3170 
3171 	switch (dev->udev->speed) {
3172 	case USB_SPEED_SUPER:
3173 		dev->rx_urb_size = RX_SS_URB_SIZE;
3174 		dev->tx_urb_size = TX_SS_URB_SIZE;
3175 		dev->n_rx_urbs = RX_SS_URB_NUM;
3176 		dev->n_tx_urbs = TX_SS_URB_NUM;
3177 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3178 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3179 		break;
3180 	case USB_SPEED_HIGH:
3181 		dev->rx_urb_size = RX_HS_URB_SIZE;
3182 		dev->tx_urb_size = TX_HS_URB_SIZE;
3183 		dev->n_rx_urbs = RX_HS_URB_NUM;
3184 		dev->n_tx_urbs = TX_HS_URB_NUM;
3185 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3186 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3187 		break;
3188 	case USB_SPEED_FULL:
3189 		dev->rx_urb_size = RX_FS_URB_SIZE;
3190 		dev->tx_urb_size = TX_FS_URB_SIZE;
3191 		dev->n_rx_urbs = RX_FS_URB_NUM;
3192 		dev->n_tx_urbs = TX_FS_URB_NUM;
3193 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3194 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3195 		break;
3196 	default:
3197 		netdev_warn(dev->net, "USB bus speed not supported\n");
3198 		result = -EIO;
3199 		break;
3200 	}
3201 
3202 	return result;
3203 }
3204 
3205 static int lan78xx_reset(struct lan78xx_net *dev)
3206 {
3207 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3208 	unsigned long timeout;
3209 	int ret;
3210 	u32 buf;
3211 
3212 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3213 	if (ret < 0)
3214 		return ret;
3215 
3216 	buf |= HW_CFG_LRST_;
3217 
3218 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3219 	if (ret < 0)
3220 		return ret;
3221 
3222 	timeout = jiffies + HZ;
3223 	do {
3224 		mdelay(1);
3225 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3226 		if (ret < 0)
3227 			return ret;
3228 
3229 		if (time_after(jiffies, timeout)) {
3230 			netdev_warn(dev->net,
3231 				    "timeout on completion of LiteReset");
3232 			ret = -ETIMEDOUT;
3233 			return ret;
3234 		}
3235 	} while (buf & HW_CFG_LRST_);
3236 
3237 	ret = lan78xx_init_mac_address(dev);
3238 	if (ret < 0)
3239 		return ret;
3240 
3241 	/* save DEVID for later usage */
3242 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3243 	if (ret < 0)
3244 		return ret;
3245 
3246 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3247 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3248 
3249 	/* Respond to the IN token with a NAK */
3250 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3251 	if (ret < 0)
3252 		return ret;
3253 
3254 	buf |= USB_CFG_BIR_;
3255 
3256 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3257 	if (ret < 0)
3258 		return ret;
3259 
3260 	/* Init LTM */
3261 	ret = lan78xx_init_ltm(dev);
3262 	if (ret < 0)
3263 		return ret;
3264 
3265 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3266 	if (ret < 0)
3267 		return ret;
3268 
3269 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3270 	if (ret < 0)
3271 		return ret;
3272 
3273 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3274 	if (ret < 0)
3275 		return ret;
3276 
3277 	buf |= HW_CFG_MEF_;
3278 	buf |= HW_CFG_CLK125_EN_;
3279 	buf |= HW_CFG_REFCLK25_EN_;
3280 
3281 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3282 	if (ret < 0)
3283 		return ret;
3284 
3285 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3286 	if (ret < 0)
3287 		return ret;
3288 
3289 	buf |= USB_CFG_BCE_;
3290 
3291 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3292 	if (ret < 0)
3293 		return ret;
3294 
3295 	/* set FIFO sizes */
3296 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3297 
3298 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3299 	if (ret < 0)
3300 		return ret;
3301 
3302 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3303 
3304 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3305 	if (ret < 0)
3306 		return ret;
3307 
3308 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3309 	if (ret < 0)
3310 		return ret;
3311 
3312 	ret = lan78xx_write_reg(dev, FLOW, 0);
3313 	if (ret < 0)
3314 		return ret;
3315 
3316 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3317 	if (ret < 0)
3318 		return ret;
3319 
3320 	/* Don't need rfe_ctl_lock during initialisation */
3321 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3322 	if (ret < 0)
3323 		return ret;
3324 
3325 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3326 
3327 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3328 	if (ret < 0)
3329 		return ret;
3330 
3331 	/* Enable or disable checksum offload engines */
3332 	ret = lan78xx_set_features(dev->net, dev->net->features);
3333 	if (ret < 0)
3334 		return ret;
3335 
3336 	lan78xx_set_multicast(dev->net);
3337 
3338 	/* reset PHY */
3339 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3340 	if (ret < 0)
3341 		return ret;
3342 
3343 	buf |= PMT_CTL_PHY_RST_;
3344 
3345 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3346 	if (ret < 0)
3347 		return ret;
3348 
3349 	timeout = jiffies + HZ;
3350 	do {
3351 		mdelay(1);
3352 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3353 		if (ret < 0)
3354 			return ret;
3355 
3356 		if (time_after(jiffies, timeout)) {
3357 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3358 			ret = -ETIMEDOUT;
3359 			return ret;
3360 		}
3361 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3362 
3363 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3364 	if (ret < 0)
3365 		return ret;
3366 
3367 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3368 
3369 	/* LAN7801 only has RGMII mode */
3370 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3371 		buf &= ~MAC_CR_GMII_EN_;
3372 
3373 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3374 	if (ret < 0)
3375 		return ret;
3376 
3377 	ret = lan78xx_set_rx_max_frame_length(dev,
3378 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3379 
3380 	return ret;
3381 }
3382 
3383 static void lan78xx_init_stats(struct lan78xx_net *dev)
3384 {
3385 	u32 *p;
3386 	int i;
3387 
3388 	/* initialize for stats update
3389 	 * some counters are 20bits and some are 32bits
3390 	 */
3391 	p = (u32 *)&dev->stats.rollover_max;
3392 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3393 		p[i] = 0xFFFFF;
3394 
3395 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3396 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3397 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3398 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3399 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3400 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3401 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3402 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3403 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3404 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3405 
3406 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3407 }
3408 
3409 static int lan78xx_open(struct net_device *net)
3410 {
3411 	struct lan78xx_net *dev = netdev_priv(net);
3412 	int ret;
3413 
3414 	netif_dbg(dev, ifup, dev->net, "open device");
3415 
3416 	ret = usb_autopm_get_interface(dev->intf);
3417 	if (ret < 0)
3418 		return ret;
3419 
3420 	mutex_lock(&dev->dev_mutex);
3421 
3422 	lan78xx_init_stats(dev);
3423 
3424 	napi_enable(&dev->napi);
3425 
3426 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3427 
3428 	/* for Link Check */
3429 	if (dev->urb_intr) {
3430 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3431 		if (ret < 0) {
3432 			netif_err(dev, ifup, dev->net,
3433 				  "intr submit %d\n", ret);
3434 			goto done;
3435 		}
3436 	}
3437 
3438 	phylink_start(dev->phylink);
3439 
3440 done:
3441 	mutex_unlock(&dev->dev_mutex);
3442 
3443 	if (ret < 0)
3444 		usb_autopm_put_interface(dev->intf);
3445 
3446 	return ret;
3447 }
3448 
3449 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3450 {
3451 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3452 	DECLARE_WAITQUEUE(wait, current);
3453 	int temp;
3454 
3455 	/* ensure there are no more active urbs */
3456 	add_wait_queue(&unlink_wakeup, &wait);
3457 	set_current_state(TASK_UNINTERRUPTIBLE);
3458 	dev->wait = &unlink_wakeup;
3459 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3460 
3461 	/* maybe wait for deletions to finish. */
3462 	while (!skb_queue_empty(&dev->rxq) ||
3463 	       !skb_queue_empty(&dev->txq)) {
3464 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3465 		set_current_state(TASK_UNINTERRUPTIBLE);
3466 		netif_dbg(dev, ifdown, dev->net,
3467 			  "waited for %d urb completions", temp);
3468 	}
3469 	set_current_state(TASK_RUNNING);
3470 	dev->wait = NULL;
3471 	remove_wait_queue(&unlink_wakeup, &wait);
3472 
3473 	/* empty Rx done, Rx overflow and Tx pend queues
3474 	 */
3475 	while (!skb_queue_empty(&dev->rxq_done)) {
3476 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3477 
3478 		lan78xx_release_rx_buf(dev, skb);
3479 	}
3480 
3481 	skb_queue_purge(&dev->rxq_overflow);
3482 	skb_queue_purge(&dev->txq_pend);
3483 }
3484 
3485 static int lan78xx_stop(struct net_device *net)
3486 {
3487 	struct lan78xx_net *dev = netdev_priv(net);
3488 
3489 	netif_dbg(dev, ifup, dev->net, "stop device");
3490 
3491 	mutex_lock(&dev->dev_mutex);
3492 
3493 	if (timer_pending(&dev->stat_monitor))
3494 		timer_delete_sync(&dev->stat_monitor);
3495 
3496 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3497 	napi_disable(&dev->napi);
3498 
3499 	lan78xx_terminate_urbs(dev);
3500 
3501 	netif_info(dev, ifdown, dev->net,
3502 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3503 		   net->stats.rx_packets, net->stats.tx_packets,
3504 		   net->stats.rx_errors, net->stats.tx_errors);
3505 
3506 	phylink_stop(dev->phylink);
3507 
3508 	usb_kill_urb(dev->urb_intr);
3509 
3510 	/* deferred work (task, timer, softirq) must also stop.
3511 	 * can't flush_scheduled_work() until we drop rtnl (later),
3512 	 * else workers could deadlock; so make workers a NOP.
3513 	 */
3514 	clear_bit(EVENT_TX_HALT, &dev->flags);
3515 	clear_bit(EVENT_RX_HALT, &dev->flags);
3516 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3517 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3518 
3519 	cancel_delayed_work_sync(&dev->wq);
3520 
3521 	usb_autopm_put_interface(dev->intf);
3522 
3523 	mutex_unlock(&dev->dev_mutex);
3524 
3525 	return 0;
3526 }
3527 
3528 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3529 			       struct sk_buff_head *list, enum skb_state state)
3530 {
3531 	unsigned long flags;
3532 	enum skb_state old_state;
3533 	struct skb_data *entry = (struct skb_data *)skb->cb;
3534 
3535 	spin_lock_irqsave(&list->lock, flags);
3536 	old_state = entry->state;
3537 	entry->state = state;
3538 
3539 	__skb_unlink(skb, list);
3540 	spin_unlock(&list->lock);
3541 	spin_lock(&dev->rxq_done.lock);
3542 
3543 	__skb_queue_tail(&dev->rxq_done, skb);
3544 	if (skb_queue_len(&dev->rxq_done) == 1)
3545 		napi_schedule(&dev->napi);
3546 
3547 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3548 
3549 	return old_state;
3550 }
3551 
3552 static void tx_complete(struct urb *urb)
3553 {
3554 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3555 	struct skb_data *entry = (struct skb_data *)skb->cb;
3556 	struct lan78xx_net *dev = entry->dev;
3557 
3558 	if (urb->status == 0) {
3559 		dev->net->stats.tx_packets += entry->num_of_packet;
3560 		dev->net->stats.tx_bytes += entry->length;
3561 	} else {
3562 		dev->net->stats.tx_errors += entry->num_of_packet;
3563 
3564 		switch (urb->status) {
3565 		case -EPIPE:
3566 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3567 			break;
3568 
3569 		/* software-driven interface shutdown */
3570 		case -ECONNRESET:
3571 		case -ESHUTDOWN:
3572 			netif_dbg(dev, tx_err, dev->net,
3573 				  "tx err interface gone %d\n",
3574 				  entry->urb->status);
3575 			break;
3576 
3577 		case -EPROTO:
3578 		case -ETIME:
3579 		case -EILSEQ:
3580 			netif_stop_queue(dev->net);
3581 			netif_dbg(dev, tx_err, dev->net,
3582 				  "tx err queue stopped %d\n",
3583 				  entry->urb->status);
3584 			break;
3585 		default:
3586 			netif_dbg(dev, tx_err, dev->net,
3587 				  "unknown tx err %d\n",
3588 				  entry->urb->status);
3589 			break;
3590 		}
3591 	}
3592 
3593 	usb_autopm_put_interface_async(dev->intf);
3594 
3595 	skb_unlink(skb, &dev->txq);
3596 
3597 	lan78xx_release_tx_buf(dev, skb);
3598 
3599 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3600 	 */
3601 	if (skb_queue_empty(&dev->txq) &&
3602 	    !skb_queue_empty(&dev->txq_pend))
3603 		napi_schedule(&dev->napi);
3604 }
3605 
3606 static void lan78xx_queue_skb(struct sk_buff_head *list,
3607 			      struct sk_buff *newsk, enum skb_state state)
3608 {
3609 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3610 
3611 	__skb_queue_tail(list, newsk);
3612 	entry->state = state;
3613 }
3614 
3615 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3616 {
3617 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3618 }
3619 
3620 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3621 {
3622 	return dev->tx_pend_data_len;
3623 }
3624 
3625 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3626 				    struct sk_buff *skb,
3627 				    unsigned int *tx_pend_data_len)
3628 {
3629 	unsigned long flags;
3630 
3631 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3632 
3633 	__skb_queue_tail(&dev->txq_pend, skb);
3634 
3635 	dev->tx_pend_data_len += skb->len;
3636 	*tx_pend_data_len = dev->tx_pend_data_len;
3637 
3638 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3639 }
3640 
3641 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3642 					 struct sk_buff *skb,
3643 					 unsigned int *tx_pend_data_len)
3644 {
3645 	unsigned long flags;
3646 
3647 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3648 
3649 	__skb_queue_head(&dev->txq_pend, skb);
3650 
3651 	dev->tx_pend_data_len += skb->len;
3652 	*tx_pend_data_len = dev->tx_pend_data_len;
3653 
3654 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3655 }
3656 
3657 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3658 				    struct sk_buff **skb,
3659 				    unsigned int *tx_pend_data_len)
3660 {
3661 	unsigned long flags;
3662 
3663 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3664 
3665 	*skb = __skb_dequeue(&dev->txq_pend);
3666 	if (*skb)
3667 		dev->tx_pend_data_len -= (*skb)->len;
3668 	*tx_pend_data_len = dev->tx_pend_data_len;
3669 
3670 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3671 }
3672 
3673 static netdev_tx_t
3674 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3675 {
3676 	struct lan78xx_net *dev = netdev_priv(net);
3677 	unsigned int tx_pend_data_len;
3678 
3679 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3680 		schedule_delayed_work(&dev->wq, 0);
3681 
3682 	skb_tx_timestamp(skb);
3683 
3684 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3685 
3686 	/* Set up a Tx URB if none is in progress */
3687 
3688 	if (skb_queue_empty(&dev->txq))
3689 		napi_schedule(&dev->napi);
3690 
3691 	/* Stop stack Tx queue if we have enough data to fill
3692 	 * all the free Tx URBs.
3693 	 */
3694 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3695 		netif_stop_queue(net);
3696 
3697 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3698 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3699 
3700 		/* Kick off transmission of pending data */
3701 
3702 		if (!skb_queue_empty(&dev->txq_free))
3703 			napi_schedule(&dev->napi);
3704 	}
3705 
3706 	return NETDEV_TX_OK;
3707 }
3708 
3709 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3710 {
3711 	struct lan78xx_priv *pdata = NULL;
3712 	int ret;
3713 	int i;
3714 
3715 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3716 
3717 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3718 	if (!pdata) {
3719 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3720 		return -ENOMEM;
3721 	}
3722 
3723 	pdata->dev = dev;
3724 
3725 	spin_lock_init(&pdata->rfe_ctl_lock);
3726 	mutex_init(&pdata->dataport_mutex);
3727 
3728 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3729 
3730 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3731 		pdata->vlan_table[i] = 0;
3732 
3733 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3734 
3735 	dev->net->features = 0;
3736 
3737 	if (DEFAULT_TX_CSUM_ENABLE)
3738 		dev->net->features |= NETIF_F_HW_CSUM;
3739 
3740 	if (DEFAULT_RX_CSUM_ENABLE)
3741 		dev->net->features |= NETIF_F_RXCSUM;
3742 
3743 	if (DEFAULT_TSO_CSUM_ENABLE)
3744 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3745 
3746 	if (DEFAULT_VLAN_RX_OFFLOAD)
3747 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3748 
3749 	if (DEFAULT_VLAN_FILTER_ENABLE)
3750 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3751 
3752 	dev->net->hw_features = dev->net->features;
3753 
3754 	ret = lan78xx_setup_irq_domain(dev);
3755 	if (ret < 0) {
3756 		netdev_warn(dev->net,
3757 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3758 		goto out1;
3759 	}
3760 
3761 	/* Init all registers */
3762 	ret = lan78xx_reset(dev);
3763 	if (ret) {
3764 		netdev_warn(dev->net, "Registers INIT FAILED....");
3765 		goto out2;
3766 	}
3767 
3768 	ret = lan78xx_mdio_init(dev);
3769 	if (ret) {
3770 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3771 		goto out2;
3772 	}
3773 
3774 	dev->net->flags |= IFF_MULTICAST;
3775 
3776 	pdata->wol = WAKE_MAGIC;
3777 
3778 	return ret;
3779 
3780 out2:
3781 	lan78xx_remove_irq_domain(dev);
3782 
3783 out1:
3784 	netdev_warn(dev->net, "Bind routine FAILED");
3785 	cancel_work_sync(&pdata->set_multicast);
3786 	cancel_work_sync(&pdata->set_vlan);
3787 	kfree(pdata);
3788 	return ret;
3789 }
3790 
3791 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3792 {
3793 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3794 
3795 	lan78xx_remove_irq_domain(dev);
3796 
3797 	lan78xx_remove_mdio(dev);
3798 
3799 	if (pdata) {
3800 		cancel_work_sync(&pdata->set_multicast);
3801 		cancel_work_sync(&pdata->set_vlan);
3802 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3803 		kfree(pdata);
3804 		pdata = NULL;
3805 		dev->data[0] = 0;
3806 	}
3807 }
3808 
3809 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3810 				    struct sk_buff *skb,
3811 				    u32 rx_cmd_a, u32 rx_cmd_b)
3812 {
3813 	/* HW Checksum offload appears to be flawed if used when not stripping
3814 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3815 	 */
3816 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3817 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3818 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3819 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3820 		skb->ip_summed = CHECKSUM_NONE;
3821 	} else {
3822 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3823 		skb->ip_summed = CHECKSUM_COMPLETE;
3824 	}
3825 }
3826 
3827 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3828 				    struct sk_buff *skb,
3829 				    u32 rx_cmd_a, u32 rx_cmd_b)
3830 {
3831 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3832 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3833 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3834 				       (rx_cmd_b & 0xffff));
3835 }
3836 
3837 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3838 {
3839 	dev->net->stats.rx_packets++;
3840 	dev->net->stats.rx_bytes += skb->len;
3841 
3842 	skb->protocol = eth_type_trans(skb, dev->net);
3843 
3844 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3845 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3846 	memset(skb->cb, 0, sizeof(struct skb_data));
3847 
3848 	if (skb_defer_rx_timestamp(skb))
3849 		return;
3850 
3851 	napi_gro_receive(&dev->napi, skb);
3852 }
3853 
3854 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3855 		      int budget, int *work_done)
3856 {
3857 	if (skb->len < RX_SKB_MIN_LEN)
3858 		return 0;
3859 
3860 	/* Extract frames from the URB buffer and pass each one to
3861 	 * the stack in a new NAPI SKB.
3862 	 */
3863 	while (skb->len > 0) {
3864 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3865 		u16 rx_cmd_c;
3866 		unsigned char *packet;
3867 
3868 		rx_cmd_a = get_unaligned_le32(skb->data);
3869 		skb_pull(skb, sizeof(rx_cmd_a));
3870 
3871 		rx_cmd_b = get_unaligned_le32(skb->data);
3872 		skb_pull(skb, sizeof(rx_cmd_b));
3873 
3874 		rx_cmd_c = get_unaligned_le16(skb->data);
3875 		skb_pull(skb, sizeof(rx_cmd_c));
3876 
3877 		packet = skb->data;
3878 
3879 		/* get the packet length */
3880 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3881 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3882 
3883 		if (unlikely(size > skb->len)) {
3884 			netif_dbg(dev, rx_err, dev->net,
3885 				  "size err rx_cmd_a=0x%08x\n",
3886 				  rx_cmd_a);
3887 			return 0;
3888 		}
3889 
3890 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3891 			netif_dbg(dev, rx_err, dev->net,
3892 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3893 		} else {
3894 			u32 frame_len;
3895 			struct sk_buff *skb2;
3896 
3897 			if (unlikely(size < ETH_FCS_LEN)) {
3898 				netif_dbg(dev, rx_err, dev->net,
3899 					  "size err rx_cmd_a=0x%08x\n",
3900 					  rx_cmd_a);
3901 				return 0;
3902 			}
3903 
3904 			frame_len = size - ETH_FCS_LEN;
3905 
3906 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3907 			if (!skb2)
3908 				return 0;
3909 
3910 			memcpy(skb2->data, packet, frame_len);
3911 
3912 			skb_put(skb2, frame_len);
3913 
3914 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3915 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3916 
3917 			/* Processing of the URB buffer must complete once
3918 			 * it has started. If the NAPI work budget is exhausted
3919 			 * while frames remain they are added to the overflow
3920 			 * queue for delivery in the next NAPI polling cycle.
3921 			 */
3922 			if (*work_done < budget) {
3923 				lan78xx_skb_return(dev, skb2);
3924 				++(*work_done);
3925 			} else {
3926 				skb_queue_tail(&dev->rxq_overflow, skb2);
3927 			}
3928 		}
3929 
3930 		skb_pull(skb, size);
3931 
3932 		/* skip padding bytes before the next frame starts */
3933 		if (skb->len)
3934 			skb_pull(skb, align_count);
3935 	}
3936 
3937 	return 1;
3938 }
3939 
3940 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3941 			      int budget, int *work_done)
3942 {
3943 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3944 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3945 		dev->net->stats.rx_errors++;
3946 	}
3947 }
3948 
3949 static void rx_complete(struct urb *urb)
3950 {
3951 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3952 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3953 	struct lan78xx_net *dev = entry->dev;
3954 	int urb_status = urb->status;
3955 	enum skb_state state;
3956 
3957 	netif_dbg(dev, rx_status, dev->net,
3958 		  "rx done: status %d", urb->status);
3959 
3960 	skb_put(skb, urb->actual_length);
3961 	state = rx_done;
3962 
3963 	if (urb != entry->urb)
3964 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3965 
3966 	switch (urb_status) {
3967 	case 0:
3968 		if (skb->len < RX_SKB_MIN_LEN) {
3969 			state = rx_cleanup;
3970 			dev->net->stats.rx_errors++;
3971 			dev->net->stats.rx_length_errors++;
3972 			netif_dbg(dev, rx_err, dev->net,
3973 				  "rx length %d\n", skb->len);
3974 		}
3975 		usb_mark_last_busy(dev->udev);
3976 		break;
3977 	case -EPIPE:
3978 		dev->net->stats.rx_errors++;
3979 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3980 		fallthrough;
3981 	case -ECONNRESET:				/* async unlink */
3982 	case -ESHUTDOWN:				/* hardware gone */
3983 		netif_dbg(dev, ifdown, dev->net,
3984 			  "rx shutdown, code %d\n", urb_status);
3985 		state = rx_cleanup;
3986 		break;
3987 	case -EPROTO:
3988 	case -ETIME:
3989 	case -EILSEQ:
3990 		dev->net->stats.rx_errors++;
3991 		state = rx_cleanup;
3992 		break;
3993 
3994 	/* data overrun ... flush fifo? */
3995 	case -EOVERFLOW:
3996 		dev->net->stats.rx_over_errors++;
3997 		fallthrough;
3998 
3999 	default:
4000 		state = rx_cleanup;
4001 		dev->net->stats.rx_errors++;
4002 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4003 		break;
4004 	}
4005 
4006 	state = defer_bh(dev, skb, &dev->rxq, state);
4007 }
4008 
4009 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4010 {
4011 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4012 	size_t size = dev->rx_urb_size;
4013 	struct urb *urb = entry->urb;
4014 	unsigned long lockflags;
4015 	int ret = 0;
4016 
4017 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4018 			  skb->data, size, rx_complete, skb);
4019 
4020 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4021 
4022 	if (netif_device_present(dev->net) &&
4023 	    netif_running(dev->net) &&
4024 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4025 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4026 		ret = usb_submit_urb(urb, flags);
4027 		switch (ret) {
4028 		case 0:
4029 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4030 			break;
4031 		case -EPIPE:
4032 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4033 			break;
4034 		case -ENODEV:
4035 		case -ENOENT:
4036 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4037 			netif_device_detach(dev->net);
4038 			break;
4039 		case -EHOSTUNREACH:
4040 			ret = -ENOLINK;
4041 			napi_schedule(&dev->napi);
4042 			break;
4043 		default:
4044 			netif_dbg(dev, rx_err, dev->net,
4045 				  "rx submit, %d\n", ret);
4046 			napi_schedule(&dev->napi);
4047 			break;
4048 		}
4049 	} else {
4050 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4051 		ret = -ENOLINK;
4052 	}
4053 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4054 
4055 	if (ret)
4056 		lan78xx_release_rx_buf(dev, skb);
4057 
4058 	return ret;
4059 }
4060 
4061 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4062 {
4063 	struct sk_buff *rx_buf;
4064 
4065 	/* Ensure the maximum number of Rx URBs is submitted
4066 	 */
4067 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4068 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4069 			break;
4070 	}
4071 }
4072 
4073 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4074 				    struct sk_buff *rx_buf)
4075 {
4076 	/* reset SKB data pointers */
4077 
4078 	rx_buf->data = rx_buf->head;
4079 	skb_reset_tail_pointer(rx_buf);
4080 	rx_buf->len = 0;
4081 	rx_buf->data_len = 0;
4082 
4083 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4084 }
4085 
4086 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4087 {
4088 	u32 tx_cmd_a;
4089 	u32 tx_cmd_b;
4090 
4091 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4092 
4093 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4094 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4095 
4096 	tx_cmd_b = 0;
4097 	if (skb_is_gso(skb)) {
4098 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4099 
4100 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4101 
4102 		tx_cmd_a |= TX_CMD_A_LSO_;
4103 	}
4104 
4105 	if (skb_vlan_tag_present(skb)) {
4106 		tx_cmd_a |= TX_CMD_A_IVTG_;
4107 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4108 	}
4109 
4110 	put_unaligned_le32(tx_cmd_a, buffer);
4111 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4112 }
4113 
4114 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4115 					    struct sk_buff *tx_buf)
4116 {
4117 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4118 	int remain = dev->tx_urb_size;
4119 	u8 *tx_data = tx_buf->data;
4120 	u32 urb_len = 0;
4121 
4122 	entry->num_of_packet = 0;
4123 	entry->length = 0;
4124 
4125 	/* Work through the pending SKBs and copy the data of each SKB into
4126 	 * the URB buffer if there room for all the SKB data.
4127 	 *
4128 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4129 	 */
4130 	while (remain >= TX_SKB_MIN_LEN) {
4131 		unsigned int pending_bytes;
4132 		unsigned int align_bytes;
4133 		struct sk_buff *skb;
4134 		unsigned int len;
4135 
4136 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4137 
4138 		if (!skb)
4139 			break;
4140 
4141 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4142 			      TX_ALIGNMENT;
4143 		len = align_bytes + TX_CMD_LEN + skb->len;
4144 		if (len > remain) {
4145 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4146 			break;
4147 		}
4148 
4149 		tx_data += align_bytes;
4150 
4151 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4152 		tx_data += TX_CMD_LEN;
4153 
4154 		len = skb->len;
4155 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4156 			struct net_device_stats *stats = &dev->net->stats;
4157 
4158 			stats->tx_dropped++;
4159 			dev_kfree_skb_any(skb);
4160 			tx_data -= TX_CMD_LEN;
4161 			continue;
4162 		}
4163 
4164 		tx_data += len;
4165 		entry->length += len;
4166 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4167 
4168 		dev_kfree_skb_any(skb);
4169 
4170 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4171 
4172 		remain = dev->tx_urb_size - urb_len;
4173 	}
4174 
4175 	skb_put(tx_buf, urb_len);
4176 
4177 	return entry;
4178 }
4179 
4180 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4181 {
4182 	int ret;
4183 
4184 	/* Start the stack Tx queue if it was stopped
4185 	 */
4186 	netif_tx_lock(dev->net);
4187 	if (netif_queue_stopped(dev->net)) {
4188 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4189 			netif_wake_queue(dev->net);
4190 	}
4191 	netif_tx_unlock(dev->net);
4192 
4193 	/* Go through the Tx pending queue and set up URBs to transfer
4194 	 * the data to the device. Stop if no more pending data or URBs,
4195 	 * or if an error occurs when a URB is submitted.
4196 	 */
4197 	do {
4198 		struct skb_data *entry;
4199 		struct sk_buff *tx_buf;
4200 		unsigned long flags;
4201 
4202 		if (skb_queue_empty(&dev->txq_pend))
4203 			break;
4204 
4205 		tx_buf = lan78xx_get_tx_buf(dev);
4206 		if (!tx_buf)
4207 			break;
4208 
4209 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4210 
4211 		spin_lock_irqsave(&dev->txq.lock, flags);
4212 		ret = usb_autopm_get_interface_async(dev->intf);
4213 		if (ret < 0) {
4214 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4215 			goto out;
4216 		}
4217 
4218 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4219 				  tx_buf->data, tx_buf->len, tx_complete,
4220 				  tx_buf);
4221 
4222 		if (tx_buf->len % dev->maxpacket == 0) {
4223 			/* send USB_ZERO_PACKET */
4224 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4225 		}
4226 
4227 #ifdef CONFIG_PM
4228 		/* if device is asleep stop outgoing packet processing */
4229 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4230 			usb_anchor_urb(entry->urb, &dev->deferred);
4231 			netif_stop_queue(dev->net);
4232 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4233 			netdev_dbg(dev->net,
4234 				   "Delaying transmission for resumption\n");
4235 			return;
4236 		}
4237 #endif
4238 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4239 		switch (ret) {
4240 		case 0:
4241 			netif_trans_update(dev->net);
4242 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4243 			break;
4244 		case -EPIPE:
4245 			netif_stop_queue(dev->net);
4246 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4247 			usb_autopm_put_interface_async(dev->intf);
4248 			break;
4249 		case -ENODEV:
4250 		case -ENOENT:
4251 			netif_dbg(dev, tx_err, dev->net,
4252 				  "tx submit urb err %d (disconnected?)", ret);
4253 			netif_device_detach(dev->net);
4254 			break;
4255 		default:
4256 			usb_autopm_put_interface_async(dev->intf);
4257 			netif_dbg(dev, tx_err, dev->net,
4258 				  "tx submit urb err %d\n", ret);
4259 			break;
4260 		}
4261 
4262 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4263 
4264 		if (ret) {
4265 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4266 out:
4267 			dev->net->stats.tx_dropped += entry->num_of_packet;
4268 			lan78xx_release_tx_buf(dev, tx_buf);
4269 		}
4270 	} while (ret == 0);
4271 }
4272 
4273 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4274 {
4275 	struct sk_buff_head done;
4276 	struct sk_buff *rx_buf;
4277 	struct skb_data *entry;
4278 	unsigned long flags;
4279 	int work_done = 0;
4280 
4281 	/* Pass frames received in the last NAPI cycle before
4282 	 * working on newly completed URBs.
4283 	 */
4284 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4285 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4286 		++work_done;
4287 	}
4288 
4289 	/* Take a snapshot of the done queue and move items to a
4290 	 * temporary queue. Rx URB completions will continue to add
4291 	 * to the done queue.
4292 	 */
4293 	__skb_queue_head_init(&done);
4294 
4295 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4296 	skb_queue_splice_init(&dev->rxq_done, &done);
4297 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4298 
4299 	/* Extract receive frames from completed URBs and
4300 	 * pass them to the stack. Re-submit each completed URB.
4301 	 */
4302 	while ((work_done < budget) &&
4303 	       (rx_buf = __skb_dequeue(&done))) {
4304 		entry = (struct skb_data *)(rx_buf->cb);
4305 		switch (entry->state) {
4306 		case rx_done:
4307 			rx_process(dev, rx_buf, budget, &work_done);
4308 			break;
4309 		case rx_cleanup:
4310 			break;
4311 		default:
4312 			netdev_dbg(dev->net, "rx buf state %d\n",
4313 				   entry->state);
4314 			break;
4315 		}
4316 
4317 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4318 	}
4319 
4320 	/* If budget was consumed before processing all the URBs put them
4321 	 * back on the front of the done queue. They will be first to be
4322 	 * processed in the next NAPI cycle.
4323 	 */
4324 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4325 	skb_queue_splice(&done, &dev->rxq_done);
4326 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4327 
4328 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4329 		/* reset update timer delta */
4330 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4331 			dev->delta = 1;
4332 			mod_timer(&dev->stat_monitor,
4333 				  jiffies + STAT_UPDATE_TIMER);
4334 		}
4335 
4336 		/* Submit all free Rx URBs */
4337 
4338 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4339 			lan78xx_rx_urb_submit_all(dev);
4340 
4341 		/* Submit new Tx URBs */
4342 
4343 		lan78xx_tx_bh(dev);
4344 	}
4345 
4346 	return work_done;
4347 }
4348 
4349 static int lan78xx_poll(struct napi_struct *napi, int budget)
4350 {
4351 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4352 	int result = budget;
4353 	int work_done;
4354 
4355 	/* Don't do any work if the device is suspended */
4356 
4357 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4358 		napi_complete_done(napi, 0);
4359 		return 0;
4360 	}
4361 
4362 	/* Process completed URBs and submit new URBs */
4363 
4364 	work_done = lan78xx_bh(dev, budget);
4365 
4366 	if (work_done < budget) {
4367 		napi_complete_done(napi, work_done);
4368 
4369 		/* Start a new polling cycle if data was received or
4370 		 * data is waiting to be transmitted.
4371 		 */
4372 		if (!skb_queue_empty(&dev->rxq_done)) {
4373 			napi_schedule(napi);
4374 		} else if (netif_carrier_ok(dev->net)) {
4375 			if (skb_queue_empty(&dev->txq) &&
4376 			    !skb_queue_empty(&dev->txq_pend)) {
4377 				napi_schedule(napi);
4378 			} else {
4379 				netif_tx_lock(dev->net);
4380 				if (netif_queue_stopped(dev->net)) {
4381 					netif_wake_queue(dev->net);
4382 					napi_schedule(napi);
4383 				}
4384 				netif_tx_unlock(dev->net);
4385 			}
4386 		}
4387 		result = work_done;
4388 	}
4389 
4390 	return result;
4391 }
4392 
4393 static void lan78xx_delayedwork(struct work_struct *work)
4394 {
4395 	int status;
4396 	struct lan78xx_net *dev;
4397 
4398 	dev = container_of(work, struct lan78xx_net, wq.work);
4399 
4400 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4401 		return;
4402 
4403 	if (usb_autopm_get_interface(dev->intf) < 0)
4404 		return;
4405 
4406 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4407 		unlink_urbs(dev, &dev->txq);
4408 
4409 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4410 		if (status < 0 &&
4411 		    status != -EPIPE &&
4412 		    status != -ESHUTDOWN) {
4413 			if (netif_msg_tx_err(dev))
4414 				netdev_err(dev->net,
4415 					   "can't clear tx halt, status %d\n",
4416 					   status);
4417 		} else {
4418 			clear_bit(EVENT_TX_HALT, &dev->flags);
4419 			if (status != -ESHUTDOWN)
4420 				netif_wake_queue(dev->net);
4421 		}
4422 	}
4423 
4424 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4425 		unlink_urbs(dev, &dev->rxq);
4426 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4427 		if (status < 0 &&
4428 		    status != -EPIPE &&
4429 		    status != -ESHUTDOWN) {
4430 			if (netif_msg_rx_err(dev))
4431 				netdev_err(dev->net,
4432 					   "can't clear rx halt, status %d\n",
4433 					   status);
4434 		} else {
4435 			clear_bit(EVENT_RX_HALT, &dev->flags);
4436 			napi_schedule(&dev->napi);
4437 		}
4438 	}
4439 
4440 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4441 		int ret = 0;
4442 
4443 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4444 		ret = lan78xx_phy_int_ack(dev);
4445 		if (ret)
4446 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4447 				    ERR_PTR(ret));
4448 	}
4449 
4450 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4451 		lan78xx_update_stats(dev);
4452 
4453 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4454 
4455 		mod_timer(&dev->stat_monitor,
4456 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4457 
4458 		dev->delta = min((dev->delta * 2), 50);
4459 	}
4460 
4461 	usb_autopm_put_interface(dev->intf);
4462 }
4463 
4464 static void intr_complete(struct urb *urb)
4465 {
4466 	struct lan78xx_net *dev = urb->context;
4467 	int status = urb->status;
4468 
4469 	switch (status) {
4470 	/* success */
4471 	case 0:
4472 		lan78xx_status(dev, urb);
4473 		break;
4474 
4475 	/* software-driven interface shutdown */
4476 	case -ENOENT:			/* urb killed */
4477 	case -ENODEV:			/* hardware gone */
4478 	case -ESHUTDOWN:		/* hardware gone */
4479 		netif_dbg(dev, ifdown, dev->net,
4480 			  "intr shutdown, code %d\n", status);
4481 		return;
4482 
4483 	/* NOTE:  not throttling like RX/TX, since this endpoint
4484 	 * already polls infrequently
4485 	 */
4486 	default:
4487 		netdev_dbg(dev->net, "intr status %d\n", status);
4488 		break;
4489 	}
4490 
4491 	if (!netif_device_present(dev->net) ||
4492 	    !netif_running(dev->net)) {
4493 		netdev_warn(dev->net, "not submitting new status URB");
4494 		return;
4495 	}
4496 
4497 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4498 	status = usb_submit_urb(urb, GFP_ATOMIC);
4499 
4500 	switch (status) {
4501 	case  0:
4502 		break;
4503 	case -ENODEV:
4504 	case -ENOENT:
4505 		netif_dbg(dev, timer, dev->net,
4506 			  "intr resubmit %d (disconnect?)", status);
4507 		netif_device_detach(dev->net);
4508 		break;
4509 	default:
4510 		netif_err(dev, timer, dev->net,
4511 			  "intr resubmit --> %d\n", status);
4512 		break;
4513 	}
4514 }
4515 
4516 static void lan78xx_disconnect(struct usb_interface *intf)
4517 {
4518 	struct lan78xx_net *dev;
4519 	struct usb_device *udev;
4520 	struct net_device *net;
4521 
4522 	dev = usb_get_intfdata(intf);
4523 	usb_set_intfdata(intf, NULL);
4524 	if (!dev)
4525 		return;
4526 
4527 	udev = interface_to_usbdev(intf);
4528 	net = dev->net;
4529 
4530 	rtnl_lock();
4531 	phylink_stop(dev->phylink);
4532 	phylink_disconnect_phy(dev->phylink);
4533 	rtnl_unlock();
4534 
4535 	netif_napi_del(&dev->napi);
4536 
4537 	unregister_netdev(net);
4538 
4539 	timer_shutdown_sync(&dev->stat_monitor);
4540 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4541 	cancel_delayed_work_sync(&dev->wq);
4542 
4543 	phylink_destroy(dev->phylink);
4544 
4545 	usb_scuttle_anchored_urbs(&dev->deferred);
4546 
4547 	lan78xx_unbind(dev, intf);
4548 
4549 	lan78xx_free_tx_resources(dev);
4550 	lan78xx_free_rx_resources(dev);
4551 
4552 	usb_kill_urb(dev->urb_intr);
4553 	usb_free_urb(dev->urb_intr);
4554 
4555 	free_netdev(net);
4556 	usb_put_dev(udev);
4557 }
4558 
4559 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4560 {
4561 	struct lan78xx_net *dev = netdev_priv(net);
4562 
4563 	unlink_urbs(dev, &dev->txq);
4564 	napi_schedule(&dev->napi);
4565 }
4566 
4567 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4568 						struct net_device *netdev,
4569 						netdev_features_t features)
4570 {
4571 	struct lan78xx_net *dev = netdev_priv(netdev);
4572 
4573 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4574 		features &= ~NETIF_F_GSO_MASK;
4575 
4576 	features = vlan_features_check(skb, features);
4577 	features = vxlan_features_check(skb, features);
4578 
4579 	return features;
4580 }
4581 
4582 static const struct net_device_ops lan78xx_netdev_ops = {
4583 	.ndo_open		= lan78xx_open,
4584 	.ndo_stop		= lan78xx_stop,
4585 	.ndo_start_xmit		= lan78xx_start_xmit,
4586 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4587 	.ndo_change_mtu		= lan78xx_change_mtu,
4588 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4589 	.ndo_validate_addr	= eth_validate_addr,
4590 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4591 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4592 	.ndo_set_features	= lan78xx_set_features,
4593 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4594 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4595 	.ndo_features_check	= lan78xx_features_check,
4596 };
4597 
4598 static void lan78xx_stat_monitor(struct timer_list *t)
4599 {
4600 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4601 
4602 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4603 }
4604 
4605 static int lan78xx_probe(struct usb_interface *intf,
4606 			 const struct usb_device_id *id)
4607 {
4608 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4609 	struct lan78xx_net *dev;
4610 	struct net_device *netdev;
4611 	struct usb_device *udev;
4612 	int ret;
4613 	unsigned int maxp;
4614 	unsigned int period;
4615 	u8 *buf = NULL;
4616 
4617 	udev = interface_to_usbdev(intf);
4618 	udev = usb_get_dev(udev);
4619 
4620 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4621 	if (!netdev) {
4622 		dev_err(&intf->dev, "Error: OOM\n");
4623 		ret = -ENOMEM;
4624 		goto out1;
4625 	}
4626 
4627 	SET_NETDEV_DEV(netdev, &intf->dev);
4628 
4629 	dev = netdev_priv(netdev);
4630 	dev->udev = udev;
4631 	dev->intf = intf;
4632 	dev->net = netdev;
4633 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4634 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4635 
4636 	skb_queue_head_init(&dev->rxq);
4637 	skb_queue_head_init(&dev->txq);
4638 	skb_queue_head_init(&dev->rxq_done);
4639 	skb_queue_head_init(&dev->txq_pend);
4640 	skb_queue_head_init(&dev->rxq_overflow);
4641 	mutex_init(&dev->mdiobus_mutex);
4642 	mutex_init(&dev->dev_mutex);
4643 
4644 	ret = lan78xx_urb_config_init(dev);
4645 	if (ret < 0)
4646 		goto out2;
4647 
4648 	ret = lan78xx_alloc_tx_resources(dev);
4649 	if (ret < 0)
4650 		goto out2;
4651 
4652 	ret = lan78xx_alloc_rx_resources(dev);
4653 	if (ret < 0)
4654 		goto out3;
4655 
4656 	/* MTU range: 68 - 9000 */
4657 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4658 
4659 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4660 
4661 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4662 
4663 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4664 	init_usb_anchor(&dev->deferred);
4665 
4666 	netdev->netdev_ops = &lan78xx_netdev_ops;
4667 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4668 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4669 
4670 	dev->delta = 1;
4671 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4672 
4673 	mutex_init(&dev->stats.access_lock);
4674 
4675 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4676 		ret = -ENODEV;
4677 		goto out4;
4678 	}
4679 
4680 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4681 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4682 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4683 		ret = -ENODEV;
4684 		goto out4;
4685 	}
4686 
4687 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4688 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4689 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4690 		ret = -ENODEV;
4691 		goto out4;
4692 	}
4693 
4694 	ep_intr = &intf->cur_altsetting->endpoint[2];
4695 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4696 		ret = -ENODEV;
4697 		goto out4;
4698 	}
4699 
4700 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4701 					usb_endpoint_num(&ep_intr->desc));
4702 
4703 	ret = lan78xx_bind(dev, intf);
4704 	if (ret < 0)
4705 		goto out4;
4706 
4707 	period = ep_intr->desc.bInterval;
4708 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4709 
4710 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4711 	if (!dev->urb_intr) {
4712 		ret = -ENOMEM;
4713 		goto out5;
4714 	}
4715 
4716 	buf = kmalloc(maxp, GFP_KERNEL);
4717 	if (!buf) {
4718 		ret = -ENOMEM;
4719 		goto free_urbs;
4720 	}
4721 
4722 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4723 			 dev->pipe_intr, buf, maxp,
4724 			 intr_complete, dev, period);
4725 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4726 
4727 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4728 
4729 	/* Reject broken descriptors. */
4730 	if (dev->maxpacket == 0) {
4731 		ret = -ENODEV;
4732 		goto free_urbs;
4733 	}
4734 
4735 	/* driver requires remote-wakeup capability during autosuspend. */
4736 	intf->needs_remote_wakeup = 1;
4737 
4738 	ret = lan78xx_phy_init(dev);
4739 	if (ret < 0)
4740 		goto free_urbs;
4741 
4742 	ret = register_netdev(netdev);
4743 	if (ret != 0) {
4744 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4745 		goto phy_uninit;
4746 	}
4747 
4748 	usb_set_intfdata(intf, dev);
4749 
4750 	ret = device_set_wakeup_enable(&udev->dev, true);
4751 
4752 	 /* Default delay of 2sec has more overhead than advantage.
4753 	  * Set to 10sec as default.
4754 	  */
4755 	pm_runtime_set_autosuspend_delay(&udev->dev,
4756 					 DEFAULT_AUTOSUSPEND_DELAY);
4757 
4758 	return 0;
4759 
4760 phy_uninit:
4761 	lan78xx_phy_uninit(dev);
4762 free_urbs:
4763 	usb_free_urb(dev->urb_intr);
4764 out5:
4765 	lan78xx_unbind(dev, intf);
4766 out4:
4767 	netif_napi_del(&dev->napi);
4768 	lan78xx_free_rx_resources(dev);
4769 out3:
4770 	lan78xx_free_tx_resources(dev);
4771 out2:
4772 	free_netdev(netdev);
4773 out1:
4774 	usb_put_dev(udev);
4775 
4776 	return ret;
4777 }
4778 
4779 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4780 {
4781 	const u16 crc16poly = 0x8005;
4782 	int i;
4783 	u16 bit, crc, msb;
4784 	u8 data;
4785 
4786 	crc = 0xFFFF;
4787 	for (i = 0; i < len; i++) {
4788 		data = *buf++;
4789 		for (bit = 0; bit < 8; bit++) {
4790 			msb = crc >> 15;
4791 			crc <<= 1;
4792 
4793 			if (msb ^ (u16)(data & 1)) {
4794 				crc ^= crc16poly;
4795 				crc |= (u16)0x0001U;
4796 			}
4797 			data >>= 1;
4798 		}
4799 	}
4800 
4801 	return crc;
4802 }
4803 
4804 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4805 {
4806 	u32 buf;
4807 	int ret;
4808 
4809 	ret = lan78xx_stop_tx_path(dev);
4810 	if (ret < 0)
4811 		return ret;
4812 
4813 	ret = lan78xx_stop_rx_path(dev);
4814 	if (ret < 0)
4815 		return ret;
4816 
4817 	/* auto suspend (selective suspend) */
4818 
4819 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4820 	if (ret < 0)
4821 		return ret;
4822 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4823 	if (ret < 0)
4824 		return ret;
4825 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4826 	if (ret < 0)
4827 		return ret;
4828 
4829 	/* set goodframe wakeup */
4830 
4831 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4832 	if (ret < 0)
4833 		return ret;
4834 
4835 	buf |= WUCSR_RFE_WAKE_EN_;
4836 	buf |= WUCSR_STORE_WAKE_;
4837 
4838 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4839 	if (ret < 0)
4840 		return ret;
4841 
4842 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4843 	if (ret < 0)
4844 		return ret;
4845 
4846 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4847 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4848 	buf |= PMT_CTL_PHY_WAKE_EN_;
4849 	buf |= PMT_CTL_WOL_EN_;
4850 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4851 	buf |= PMT_CTL_SUS_MODE_3_;
4852 
4853 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4854 	if (ret < 0)
4855 		return ret;
4856 
4857 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4858 	if (ret < 0)
4859 		return ret;
4860 
4861 	buf |= PMT_CTL_WUPS_MASK_;
4862 
4863 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4864 	if (ret < 0)
4865 		return ret;
4866 
4867 	ret = lan78xx_start_rx_path(dev);
4868 
4869 	return ret;
4870 }
4871 
4872 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4873 {
4874 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4875 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4876 	const u8 arp_type[2] = { 0x08, 0x06 };
4877 	u32 temp_pmt_ctl;
4878 	int mask_index;
4879 	u32 temp_wucsr;
4880 	u32 buf;
4881 	u16 crc;
4882 	int ret;
4883 
4884 	ret = lan78xx_stop_tx_path(dev);
4885 	if (ret < 0)
4886 		return ret;
4887 	ret = lan78xx_stop_rx_path(dev);
4888 	if (ret < 0)
4889 		return ret;
4890 
4891 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4892 	if (ret < 0)
4893 		return ret;
4894 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4895 	if (ret < 0)
4896 		return ret;
4897 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4898 	if (ret < 0)
4899 		return ret;
4900 
4901 	temp_wucsr = 0;
4902 
4903 	temp_pmt_ctl = 0;
4904 
4905 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4906 	if (ret < 0)
4907 		return ret;
4908 
4909 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4910 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4911 
4912 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4913 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4914 		if (ret < 0)
4915 			return ret;
4916 	}
4917 
4918 	mask_index = 0;
4919 	if (wol & WAKE_PHY) {
4920 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4921 
4922 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4923 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4924 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4925 	}
4926 	if (wol & WAKE_MAGIC) {
4927 		temp_wucsr |= WUCSR_MPEN_;
4928 
4929 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4930 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4931 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4932 	}
4933 	if (wol & WAKE_BCAST) {
4934 		temp_wucsr |= WUCSR_BCST_EN_;
4935 
4936 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4937 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4938 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4939 	}
4940 	if (wol & WAKE_MCAST) {
4941 		temp_wucsr |= WUCSR_WAKE_EN_;
4942 
4943 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4944 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4945 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4946 					WUF_CFGX_EN_ |
4947 					WUF_CFGX_TYPE_MCAST_ |
4948 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4949 					(crc & WUF_CFGX_CRC16_MASK_));
4950 		if (ret < 0)
4951 			return ret;
4952 
4953 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4954 		if (ret < 0)
4955 			return ret;
4956 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4957 		if (ret < 0)
4958 			return ret;
4959 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4960 		if (ret < 0)
4961 			return ret;
4962 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4963 		if (ret < 0)
4964 			return ret;
4965 
4966 		mask_index++;
4967 
4968 		/* for IPv6 Multicast */
4969 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4970 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4971 					WUF_CFGX_EN_ |
4972 					WUF_CFGX_TYPE_MCAST_ |
4973 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4974 					(crc & WUF_CFGX_CRC16_MASK_));
4975 		if (ret < 0)
4976 			return ret;
4977 
4978 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4979 		if (ret < 0)
4980 			return ret;
4981 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4982 		if (ret < 0)
4983 			return ret;
4984 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4985 		if (ret < 0)
4986 			return ret;
4987 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4988 		if (ret < 0)
4989 			return ret;
4990 
4991 		mask_index++;
4992 
4993 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4994 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4995 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4996 	}
4997 	if (wol & WAKE_UCAST) {
4998 		temp_wucsr |= WUCSR_PFDA_EN_;
4999 
5000 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5001 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5002 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5003 	}
5004 	if (wol & WAKE_ARP) {
5005 		temp_wucsr |= WUCSR_WAKE_EN_;
5006 
5007 		/* set WUF_CFG & WUF_MASK
5008 		 * for packettype (offset 12,13) = ARP (0x0806)
5009 		 */
5010 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5011 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5012 					WUF_CFGX_EN_ |
5013 					WUF_CFGX_TYPE_ALL_ |
5014 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5015 					(crc & WUF_CFGX_CRC16_MASK_));
5016 		if (ret < 0)
5017 			return ret;
5018 
5019 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5020 		if (ret < 0)
5021 			return ret;
5022 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5023 		if (ret < 0)
5024 			return ret;
5025 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5026 		if (ret < 0)
5027 			return ret;
5028 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5029 		if (ret < 0)
5030 			return ret;
5031 
5032 		mask_index++;
5033 
5034 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5035 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5036 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5037 	}
5038 
5039 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5040 	if (ret < 0)
5041 		return ret;
5042 
5043 	/* when multiple WOL bits are set */
5044 	if (hweight_long((unsigned long)wol) > 1) {
5045 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5046 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5047 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5048 	}
5049 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5050 	if (ret < 0)
5051 		return ret;
5052 
5053 	/* clear WUPS */
5054 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5055 	if (ret < 0)
5056 		return ret;
5057 
5058 	buf |= PMT_CTL_WUPS_MASK_;
5059 
5060 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5061 	if (ret < 0)
5062 		return ret;
5063 
5064 	ret = lan78xx_start_rx_path(dev);
5065 
5066 	return ret;
5067 }
5068 
5069 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5070 {
5071 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5072 	bool dev_open;
5073 	int ret;
5074 
5075 	mutex_lock(&dev->dev_mutex);
5076 
5077 	netif_dbg(dev, ifdown, dev->net,
5078 		  "suspending: pm event %#x", message.event);
5079 
5080 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5081 
5082 	if (dev_open) {
5083 		spin_lock_irq(&dev->txq.lock);
5084 		/* don't autosuspend while transmitting */
5085 		if ((skb_queue_len(&dev->txq) ||
5086 		     skb_queue_len(&dev->txq_pend)) &&
5087 		    PMSG_IS_AUTO(message)) {
5088 			spin_unlock_irq(&dev->txq.lock);
5089 			ret = -EBUSY;
5090 			goto out;
5091 		} else {
5092 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5093 			spin_unlock_irq(&dev->txq.lock);
5094 		}
5095 
5096 		rtnl_lock();
5097 		phylink_suspend(dev->phylink, false);
5098 		rtnl_unlock();
5099 
5100 		/* stop RX */
5101 		ret = lan78xx_stop_rx_path(dev);
5102 		if (ret < 0)
5103 			goto out;
5104 
5105 		ret = lan78xx_flush_rx_fifo(dev);
5106 		if (ret < 0)
5107 			goto out;
5108 
5109 		/* stop Tx */
5110 		ret = lan78xx_stop_tx_path(dev);
5111 		if (ret < 0)
5112 			goto out;
5113 
5114 		/* empty out the Rx and Tx queues */
5115 		netif_device_detach(dev->net);
5116 		lan78xx_terminate_urbs(dev);
5117 		usb_kill_urb(dev->urb_intr);
5118 
5119 		/* reattach */
5120 		netif_device_attach(dev->net);
5121 
5122 		timer_delete(&dev->stat_monitor);
5123 
5124 		if (PMSG_IS_AUTO(message)) {
5125 			ret = lan78xx_set_auto_suspend(dev);
5126 			if (ret < 0)
5127 				goto out;
5128 		} else {
5129 			struct lan78xx_priv *pdata;
5130 
5131 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5132 			netif_carrier_off(dev->net);
5133 			ret = lan78xx_set_suspend(dev, pdata->wol);
5134 			if (ret < 0)
5135 				goto out;
5136 		}
5137 	} else {
5138 		/* Interface is down; don't allow WOL and PHY
5139 		 * events to wake up the host
5140 		 */
5141 		u32 buf;
5142 
5143 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5144 
5145 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5146 		if (ret < 0)
5147 			goto out;
5148 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5149 		if (ret < 0)
5150 			goto out;
5151 
5152 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5153 		if (ret < 0)
5154 			goto out;
5155 
5156 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5157 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5158 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5159 		buf |= PMT_CTL_SUS_MODE_3_;
5160 
5161 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5162 		if (ret < 0)
5163 			goto out;
5164 
5165 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5166 		if (ret < 0)
5167 			goto out;
5168 
5169 		buf |= PMT_CTL_WUPS_MASK_;
5170 
5171 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5172 		if (ret < 0)
5173 			goto out;
5174 	}
5175 
5176 	ret = 0;
5177 out:
5178 	mutex_unlock(&dev->dev_mutex);
5179 
5180 	return ret;
5181 }
5182 
5183 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5184 {
5185 	bool pipe_halted = false;
5186 	struct urb *urb;
5187 
5188 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5189 		struct sk_buff *skb = urb->context;
5190 		int ret;
5191 
5192 		if (!netif_device_present(dev->net) ||
5193 		    !netif_carrier_ok(dev->net) ||
5194 		    pipe_halted) {
5195 			lan78xx_release_tx_buf(dev, skb);
5196 			continue;
5197 		}
5198 
5199 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5200 
5201 		if (ret == 0) {
5202 			netif_trans_update(dev->net);
5203 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5204 		} else {
5205 			if (ret == -EPIPE) {
5206 				netif_stop_queue(dev->net);
5207 				pipe_halted = true;
5208 			} else if (ret == -ENODEV) {
5209 				netif_device_detach(dev->net);
5210 			}
5211 
5212 			lan78xx_release_tx_buf(dev, skb);
5213 		}
5214 	}
5215 
5216 	return pipe_halted;
5217 }
5218 
5219 static int lan78xx_resume(struct usb_interface *intf)
5220 {
5221 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5222 	bool dev_open;
5223 	int ret;
5224 
5225 	mutex_lock(&dev->dev_mutex);
5226 
5227 	netif_dbg(dev, ifup, dev->net, "resuming device");
5228 
5229 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5230 
5231 	if (dev_open) {
5232 		bool pipe_halted = false;
5233 
5234 		ret = lan78xx_flush_tx_fifo(dev);
5235 		if (ret < 0)
5236 			goto out;
5237 
5238 		if (dev->urb_intr) {
5239 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5240 
5241 			if (ret < 0) {
5242 				if (ret == -ENODEV)
5243 					netif_device_detach(dev->net);
5244 				netdev_warn(dev->net, "Failed to submit intr URB");
5245 			}
5246 		}
5247 
5248 		spin_lock_irq(&dev->txq.lock);
5249 
5250 		if (netif_device_present(dev->net)) {
5251 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5252 
5253 			if (pipe_halted)
5254 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5255 		}
5256 
5257 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5258 
5259 		spin_unlock_irq(&dev->txq.lock);
5260 
5261 		if (!pipe_halted &&
5262 		    netif_device_present(dev->net) &&
5263 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5264 			netif_start_queue(dev->net);
5265 
5266 		ret = lan78xx_start_tx_path(dev);
5267 		if (ret < 0)
5268 			goto out;
5269 
5270 		napi_schedule(&dev->napi);
5271 
5272 		if (!timer_pending(&dev->stat_monitor)) {
5273 			dev->delta = 1;
5274 			mod_timer(&dev->stat_monitor,
5275 				  jiffies + STAT_UPDATE_TIMER);
5276 		}
5277 
5278 	} else {
5279 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5280 	}
5281 
5282 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5283 	if (ret < 0)
5284 		goto out;
5285 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5286 	if (ret < 0)
5287 		goto out;
5288 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5289 	if (ret < 0)
5290 		goto out;
5291 
5292 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5293 					     WUCSR2_ARP_RCD_ |
5294 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5295 					     WUCSR2_IPV4_TCPSYN_RCD_);
5296 	if (ret < 0)
5297 		goto out;
5298 
5299 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5300 					    WUCSR_EEE_RX_WAKE_ |
5301 					    WUCSR_PFDA_FR_ |
5302 					    WUCSR_RFE_WAKE_FR_ |
5303 					    WUCSR_WUFR_ |
5304 					    WUCSR_MPR_ |
5305 					    WUCSR_BCST_FR_);
5306 	if (ret < 0)
5307 		goto out;
5308 
5309 	ret = 0;
5310 out:
5311 	mutex_unlock(&dev->dev_mutex);
5312 
5313 	return ret;
5314 }
5315 
5316 static int lan78xx_reset_resume(struct usb_interface *intf)
5317 {
5318 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5319 	int ret;
5320 
5321 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5322 
5323 	ret = lan78xx_reset(dev);
5324 	if (ret < 0)
5325 		return ret;
5326 
5327 	ret = lan78xx_resume(intf);
5328 	if (ret < 0)
5329 		return ret;
5330 
5331 	rtnl_lock();
5332 	phylink_resume(dev->phylink);
5333 	rtnl_unlock();
5334 
5335 	return 0;
5336 }
5337 
5338 static const struct usb_device_id products[] = {
5339 	{
5340 	/* LAN7800 USB Gigabit Ethernet Device */
5341 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5342 	},
5343 	{
5344 	/* LAN7850 USB Gigabit Ethernet Device */
5345 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5346 	},
5347 	{
5348 	/* LAN7801 USB Gigabit Ethernet Device */
5349 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5350 	},
5351 	{
5352 	/* ATM2-AF USB Gigabit Ethernet Device */
5353 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5354 	},
5355 	{},
5356 };
5357 MODULE_DEVICE_TABLE(usb, products);
5358 
5359 static struct usb_driver lan78xx_driver = {
5360 	.name			= DRIVER_NAME,
5361 	.id_table		= products,
5362 	.probe			= lan78xx_probe,
5363 	.disconnect		= lan78xx_disconnect,
5364 	.suspend		= lan78xx_suspend,
5365 	.resume			= lan78xx_resume,
5366 	.reset_resume		= lan78xx_reset_resume,
5367 	.supports_autosuspend	= 1,
5368 	.disable_hub_initiated_lpm = 1,
5369 };
5370 
5371 module_usb_driver(lan78xx_driver);
5372 
5373 MODULE_AUTHOR(DRIVER_AUTHOR);
5374 MODULE_DESCRIPTION(DRIVER_DESC);
5375 MODULE_LICENSE("GPL");
5376