xref: /linux/drivers/net/usb/lan78xx.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/selftests.h>
24 #include <net/vxlan.h>
25 #include <linux/interrupt.h>
26 #include <linux/irqdomain.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/microchipphy.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1084 		int rc = lan78xx_write_reg(dev, HW_CFG, saved);
1085 		/* If USB fails, there is nothing to do */
1086 		if (rc < 0)
1087 			return rc;
1088 	}
1089 	return ret;
1090 }
1091 
1092 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1093 			       u32 length, u8 *data)
1094 {
1095 	int ret;
1096 	u8 sig;
1097 
1098 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1099 	if (ret < 0)
1100 		return ret;
1101 
1102 	if (sig != EEPROM_INDICATOR)
1103 		return -ENODATA;
1104 
1105 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1106 }
1107 
1108 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1109 				    u32 length, u8 *data)
1110 {
1111 	u32 val;
1112 	u32 saved;
1113 	int i, ret;
1114 
1115 	/* depends on chip, some EEPROM pins are muxed with LED function.
1116 	 * disable & restore LED function to access EEPROM.
1117 	 */
1118 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1119 	if (ret < 0)
1120 		return ret;
1121 
1122 	saved = val;
1123 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1124 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1125 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1126 		if (ret < 0)
1127 			return ret;
1128 	}
1129 
1130 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1131 	/* Looks like not USB specific error, try to recover */
1132 	if (ret == -ETIMEDOUT)
1133 		goto write_raw_eeprom_done;
1134 	/* If USB fails, there is nothing to do */
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	/* Issue write/erase enable command */
1139 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1140 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1141 	if (ret < 0)
1142 		return ret;
1143 
1144 	ret = lan78xx_wait_eeprom(dev);
1145 	/* Looks like not USB specific error, try to recover */
1146 	if (ret == -ETIMEDOUT)
1147 		goto write_raw_eeprom_done;
1148 	/* If USB fails, there is nothing to do */
1149 	if (ret < 0)
1150 		return ret;
1151 
1152 	for (i = 0; i < length; i++) {
1153 		/* Fill data register */
1154 		val = data[i];
1155 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1156 		if (ret < 0)
1157 			return ret;
1158 
1159 		/* Send "write" command */
1160 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1161 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1162 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1163 		if (ret < 0)
1164 			return ret;
1165 
1166 		ret = lan78xx_wait_eeprom(dev);
1167 		/* Looks like not USB specific error, try to recover */
1168 		if (ret == -ETIMEDOUT)
1169 			goto write_raw_eeprom_done;
1170 		/* If USB fails, there is nothing to do */
1171 		if (ret < 0)
1172 			return ret;
1173 
1174 		offset++;
1175 	}
1176 
1177 write_raw_eeprom_done:
1178 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1179 		int rc = lan78xx_write_reg(dev, HW_CFG, saved);
1180 		/* If USB fails, there is nothing to do */
1181 		if (rc < 0)
1182 			return rc;
1183 	}
1184 	return ret;
1185 }
1186 
1187 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1188 				u32 length, u8 *data)
1189 {
1190 	unsigned long timeout;
1191 	int ret, i;
1192 	u32 buf;
1193 
1194 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1195 	if (ret < 0)
1196 		return ret;
1197 
1198 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1199 		/* clear it and wait to be cleared */
1200 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1201 		if (ret < 0)
1202 			return ret;
1203 
1204 		timeout = jiffies + HZ;
1205 		do {
1206 			usleep_range(1, 10);
1207 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1208 			if (ret < 0)
1209 				return ret;
1210 
1211 			if (time_after(jiffies, timeout)) {
1212 				netdev_warn(dev->net,
1213 					    "timeout on OTP_PWR_DN");
1214 				return -ETIMEDOUT;
1215 			}
1216 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1217 	}
1218 
1219 	for (i = 0; i < length; i++) {
1220 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1221 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1222 		if (ret < 0)
1223 			return ret;
1224 
1225 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1226 					((offset + i) & OTP_ADDR2_10_3));
1227 		if (ret < 0)
1228 			return ret;
1229 
1230 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1231 		if (ret < 0)
1232 			return ret;
1233 
1234 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1235 		if (ret < 0)
1236 			return ret;
1237 
1238 		timeout = jiffies + HZ;
1239 		do {
1240 			udelay(1);
1241 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1242 			if (ret < 0)
1243 				return ret;
1244 
1245 			if (time_after(jiffies, timeout)) {
1246 				netdev_warn(dev->net,
1247 					    "timeout on OTP_STATUS");
1248 				return -ETIMEDOUT;
1249 			}
1250 		} while (buf & OTP_STATUS_BUSY_);
1251 
1252 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1253 		if (ret < 0)
1254 			return ret;
1255 
1256 		data[i] = (u8)(buf & 0xFF);
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1263 				 u32 length, u8 *data)
1264 {
1265 	int i;
1266 	u32 buf;
1267 	unsigned long timeout;
1268 	int ret;
1269 
1270 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1271 	if (ret < 0)
1272 		return ret;
1273 
1274 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1275 		/* clear it and wait to be cleared */
1276 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1277 		if (ret < 0)
1278 			return ret;
1279 
1280 		timeout = jiffies + HZ;
1281 		do {
1282 			udelay(1);
1283 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1284 			if (ret < 0)
1285 				return ret;
1286 
1287 			if (time_after(jiffies, timeout)) {
1288 				netdev_warn(dev->net,
1289 					    "timeout on OTP_PWR_DN completion");
1290 				return -ETIMEDOUT;
1291 			}
1292 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1293 	}
1294 
1295 	/* set to BYTE program mode */
1296 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1297 	if (ret < 0)
1298 		return ret;
1299 
1300 	for (i = 0; i < length; i++) {
1301 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1302 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1303 		if (ret < 0)
1304 			return ret;
1305 
1306 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1307 					((offset + i) & OTP_ADDR2_10_3));
1308 		if (ret < 0)
1309 			return ret;
1310 
1311 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1312 		if (ret < 0)
1313 			return ret;
1314 
1315 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1320 		if (ret < 0)
1321 			return ret;
1322 
1323 		timeout = jiffies + HZ;
1324 		do {
1325 			udelay(1);
1326 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1327 			if (ret < 0)
1328 				return ret;
1329 
1330 			if (time_after(jiffies, timeout)) {
1331 				netdev_warn(dev->net,
1332 					    "Timeout on OTP_STATUS completion");
1333 				return -ETIMEDOUT;
1334 			}
1335 		} while (buf & OTP_STATUS_BUSY_);
1336 	}
1337 
1338 	return 0;
1339 }
1340 
1341 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1342 			    u32 length, u8 *data)
1343 {
1344 	u8 sig;
1345 	int ret;
1346 
1347 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1348 
1349 	if (ret == 0) {
1350 		if (sig == OTP_INDICATOR_2)
1351 			offset += 0x100;
1352 		else if (sig != OTP_INDICATOR_1)
1353 			ret = -EINVAL;
1354 		if (!ret)
1355 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1356 	}
1357 
1358 	return ret;
1359 }
1360 
1361 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1362 {
1363 	int i, ret;
1364 
1365 	for (i = 0; i < 100; i++) {
1366 		u32 dp_sel;
1367 
1368 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1369 		if (unlikely(ret < 0))
1370 			return ret;
1371 
1372 		if (dp_sel & DP_SEL_DPRDY_)
1373 			return 0;
1374 
1375 		usleep_range(40, 100);
1376 	}
1377 
1378 	netdev_warn(dev->net, "%s timed out", __func__);
1379 
1380 	return -ETIMEDOUT;
1381 }
1382 
1383 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1384 				  u32 addr, u32 length, u32 *buf)
1385 {
1386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1387 	int i, ret;
1388 
1389 	ret = usb_autopm_get_interface(dev->intf);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	mutex_lock(&pdata->dataport_mutex);
1394 
1395 	ret = lan78xx_dataport_wait_not_busy(dev);
1396 	if (ret < 0)
1397 		goto dataport_write;
1398 
1399 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1400 	if (ret < 0)
1401 		goto dataport_write;
1402 
1403 	for (i = 0; i < length; i++) {
1404 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1405 		if (ret < 0)
1406 			goto dataport_write;
1407 
1408 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1409 		if (ret < 0)
1410 			goto dataport_write;
1411 
1412 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1413 		if (ret < 0)
1414 			goto dataport_write;
1415 
1416 		ret = lan78xx_dataport_wait_not_busy(dev);
1417 		if (ret < 0)
1418 			goto dataport_write;
1419 	}
1420 
1421 dataport_write:
1422 	if (ret < 0)
1423 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1424 
1425 	mutex_unlock(&pdata->dataport_mutex);
1426 	usb_autopm_put_interface(dev->intf);
1427 
1428 	return ret;
1429 }
1430 
1431 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1432 				    int index, u8 addr[ETH_ALEN])
1433 {
1434 	u32 temp;
1435 
1436 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1437 		temp = addr[3];
1438 		temp = addr[2] | (temp << 8);
1439 		temp = addr[1] | (temp << 8);
1440 		temp = addr[0] | (temp << 8);
1441 		pdata->pfilter_table[index][1] = temp;
1442 		temp = addr[5];
1443 		temp = addr[4] | (temp << 8);
1444 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1445 		pdata->pfilter_table[index][0] = temp;
1446 	}
1447 }
1448 
1449 /* returns hash bit number for given MAC address */
1450 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1451 {
1452 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1453 }
1454 
1455 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1456 {
1457 	struct lan78xx_priv *pdata =
1458 			container_of(param, struct lan78xx_priv, set_multicast);
1459 	struct lan78xx_net *dev = pdata->dev;
1460 	int i, ret;
1461 
1462 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1463 		  pdata->rfe_ctl);
1464 
1465 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1466 				     DP_SEL_VHF_VLAN_LEN,
1467 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1468 	if (ret < 0)
1469 		goto multicast_write_done;
1470 
1471 	for (i = 1; i < NUM_OF_MAF; i++) {
1472 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1473 		if (ret < 0)
1474 			goto multicast_write_done;
1475 
1476 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1477 					pdata->pfilter_table[i][1]);
1478 		if (ret < 0)
1479 			goto multicast_write_done;
1480 
1481 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1482 					pdata->pfilter_table[i][0]);
1483 		if (ret < 0)
1484 			goto multicast_write_done;
1485 	}
1486 
1487 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1488 
1489 multicast_write_done:
1490 	if (ret < 0)
1491 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1492 	return;
1493 }
1494 
1495 static void lan78xx_set_multicast(struct net_device *netdev)
1496 {
1497 	struct lan78xx_net *dev = netdev_priv(netdev);
1498 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1499 	unsigned long flags;
1500 	int i;
1501 
1502 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1503 
1504 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1505 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1506 
1507 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1508 		pdata->mchash_table[i] = 0;
1509 
1510 	/* pfilter_table[0] has own HW address */
1511 	for (i = 1; i < NUM_OF_MAF; i++) {
1512 		pdata->pfilter_table[i][0] = 0;
1513 		pdata->pfilter_table[i][1] = 0;
1514 	}
1515 
1516 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1517 
1518 	if (dev->net->flags & IFF_PROMISC) {
1519 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1520 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1521 	} else {
1522 		if (dev->net->flags & IFF_ALLMULTI) {
1523 			netif_dbg(dev, drv, dev->net,
1524 				  "receive all multicast enabled");
1525 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1526 		}
1527 	}
1528 
1529 	if (netdev_mc_count(dev->net)) {
1530 		struct netdev_hw_addr *ha;
1531 		int i;
1532 
1533 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1534 
1535 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1536 
1537 		i = 1;
1538 		netdev_for_each_mc_addr(ha, netdev) {
1539 			/* set first 32 into Perfect Filter */
1540 			if (i < 33) {
1541 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1542 			} else {
1543 				u32 bitnum = lan78xx_hash(ha->addr);
1544 
1545 				pdata->mchash_table[bitnum / 32] |=
1546 							(1 << (bitnum % 32));
1547 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1548 			}
1549 			i++;
1550 		}
1551 	}
1552 
1553 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1554 
1555 	/* defer register writes to a sleepable context */
1556 	schedule_work(&pdata->set_multicast);
1557 }
1558 
1559 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1560 
1561 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1562 {
1563 	unsigned long start_time = jiffies;
1564 	u32 val;
1565 	int ret;
1566 
1567 	mutex_lock(&dev->mdiobus_mutex);
1568 
1569 	/* Resetting the device while there is activity on the MDIO
1570 	 * bus can result in the MAC interface locking up and not
1571 	 * completing register access transactions.
1572 	 */
1573 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1574 	if (ret < 0)
1575 		goto exit_unlock;
1576 
1577 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1578 	if (ret < 0)
1579 		goto exit_unlock;
1580 
1581 	val |= MAC_CR_RST_;
1582 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1583 	if (ret < 0)
1584 		goto exit_unlock;
1585 
1586 	/* Wait for the reset to complete before allowing any further
1587 	 * MAC register accesses otherwise the MAC may lock up.
1588 	 */
1589 	do {
1590 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1591 		if (ret < 0)
1592 			goto exit_unlock;
1593 
1594 		if (!(val & MAC_CR_RST_)) {
1595 			ret = 0;
1596 			goto exit_unlock;
1597 		}
1598 	} while (!time_after(jiffies, start_time + HZ));
1599 
1600 	ret = -ETIMEDOUT;
1601 exit_unlock:
1602 	mutex_unlock(&dev->mdiobus_mutex);
1603 
1604 	return ret;
1605 }
1606 
1607 /**
1608  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1609  * @dev: pointer to the LAN78xx device structure
1610  *
1611  * This function acknowledges the PHY interrupt by setting the
1612  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1613  *
1614  * Return: 0 on success or a negative error code on failure.
1615  */
1616 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1617 {
1618 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1619 }
1620 
1621 /* some work can't be done in tasklets, so we use keventd
1622  *
1623  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1624  * but tasklet_schedule() doesn't.	hope the failure is rare.
1625  */
1626 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1627 {
1628 	set_bit(work, &dev->flags);
1629 	if (!schedule_delayed_work(&dev->wq, 0))
1630 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1631 }
1632 
1633 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1634 {
1635 	u32 intdata;
1636 
1637 	if (urb->actual_length != 4) {
1638 		netdev_warn(dev->net,
1639 			    "unexpected urb length %d", urb->actual_length);
1640 		return;
1641 	}
1642 
1643 	intdata = get_unaligned_le32(urb->transfer_buffer);
1644 
1645 	if (intdata & INT_ENP_PHY_INT) {
1646 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1647 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1648 
1649 		if (dev->domain_data.phyirq > 0)
1650 			generic_handle_irq_safe(dev->domain_data.phyirq);
1651 	} else {
1652 		netdev_warn(dev->net,
1653 			    "unexpected interrupt: 0x%08x\n", intdata);
1654 	}
1655 }
1656 
1657 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1658 {
1659 	return MAX_EEPROM_SIZE;
1660 }
1661 
1662 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1663 				      struct ethtool_eeprom *ee, u8 *data)
1664 {
1665 	struct lan78xx_net *dev = netdev_priv(netdev);
1666 	int ret;
1667 
1668 	ret = usb_autopm_get_interface(dev->intf);
1669 	if (ret)
1670 		return ret;
1671 
1672 	ee->magic = LAN78XX_EEPROM_MAGIC;
1673 
1674 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1675 
1676 	usb_autopm_put_interface(dev->intf);
1677 
1678 	return ret;
1679 }
1680 
1681 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1682 				      struct ethtool_eeprom *ee, u8 *data)
1683 {
1684 	struct lan78xx_net *dev = netdev_priv(netdev);
1685 	int ret;
1686 
1687 	ret = usb_autopm_get_interface(dev->intf);
1688 	if (ret)
1689 		return ret;
1690 
1691 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1692 	 * to load data from EEPROM
1693 	 */
1694 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1695 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1696 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1697 		 (ee->offset == 0) &&
1698 		 (ee->len == 512) &&
1699 		 (data[0] == OTP_INDICATOR_1))
1700 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1701 
1702 	usb_autopm_put_interface(dev->intf);
1703 
1704 	return ret;
1705 }
1706 
1707 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1708 				u8 *data)
1709 {
1710 	if (stringset == ETH_SS_STATS)
1711 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1712 	else if (stringset == ETH_SS_TEST)
1713 		net_selftest_get_strings(data);
1714 }
1715 
1716 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1717 {
1718 	if (sset == ETH_SS_STATS)
1719 		return ARRAY_SIZE(lan78xx_gstrings);
1720 	else if (sset == ETH_SS_TEST)
1721 		return net_selftest_get_count();
1722 	else
1723 		return -EOPNOTSUPP;
1724 }
1725 
1726 static void lan78xx_get_stats(struct net_device *netdev,
1727 			      struct ethtool_stats *stats, u64 *data)
1728 {
1729 	struct lan78xx_net *dev = netdev_priv(netdev);
1730 
1731 	lan78xx_update_stats(dev);
1732 
1733 	mutex_lock(&dev->stats.access_lock);
1734 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1735 	mutex_unlock(&dev->stats.access_lock);
1736 }
1737 
1738 static void lan78xx_get_wol(struct net_device *netdev,
1739 			    struct ethtool_wolinfo *wol)
1740 {
1741 	struct lan78xx_net *dev = netdev_priv(netdev);
1742 	int ret;
1743 	u32 buf;
1744 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1745 
1746 	if (usb_autopm_get_interface(dev->intf) < 0)
1747 		return;
1748 
1749 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1750 	if (unlikely(ret < 0)) {
1751 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1752 		wol->supported = 0;
1753 		wol->wolopts = 0;
1754 	} else {
1755 		if (buf & USB_CFG_RMT_WKP_) {
1756 			wol->supported = WAKE_ALL;
1757 			wol->wolopts = pdata->wol;
1758 		} else {
1759 			wol->supported = 0;
1760 			wol->wolopts = 0;
1761 		}
1762 	}
1763 
1764 	usb_autopm_put_interface(dev->intf);
1765 }
1766 
1767 static int lan78xx_set_wol(struct net_device *netdev,
1768 			   struct ethtool_wolinfo *wol)
1769 {
1770 	struct lan78xx_net *dev = netdev_priv(netdev);
1771 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1772 	int ret;
1773 
1774 	if (wol->wolopts & ~WAKE_ALL)
1775 		return -EINVAL;
1776 
1777 	ret = usb_autopm_get_interface(dev->intf);
1778 	if (ret < 0)
1779 		return ret;
1780 
1781 	pdata->wol = wol->wolopts;
1782 
1783 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1784 	if (ret < 0)
1785 		goto exit_pm_put;
1786 
1787 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1788 
1789 exit_pm_put:
1790 	usb_autopm_put_interface(dev->intf);
1791 
1792 	return ret;
1793 }
1794 
1795 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1796 {
1797 	struct lan78xx_net *dev = netdev_priv(net);
1798 
1799 	return phylink_ethtool_get_eee(dev->phylink, edata);
1800 }
1801 
1802 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1803 {
1804 	struct lan78xx_net *dev = netdev_priv(net);
1805 
1806 	return phylink_ethtool_set_eee(dev->phylink, edata);
1807 }
1808 
1809 static void lan78xx_get_drvinfo(struct net_device *net,
1810 				struct ethtool_drvinfo *info)
1811 {
1812 	struct lan78xx_net *dev = netdev_priv(net);
1813 
1814 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1815 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1816 }
1817 
1818 static u32 lan78xx_get_msglevel(struct net_device *net)
1819 {
1820 	struct lan78xx_net *dev = netdev_priv(net);
1821 
1822 	return dev->msg_enable;
1823 }
1824 
1825 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1826 {
1827 	struct lan78xx_net *dev = netdev_priv(net);
1828 
1829 	dev->msg_enable = level;
1830 }
1831 
1832 static int lan78xx_get_link_ksettings(struct net_device *net,
1833 				      struct ethtool_link_ksettings *cmd)
1834 {
1835 	struct lan78xx_net *dev = netdev_priv(net);
1836 
1837 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1838 }
1839 
1840 static int lan78xx_set_link_ksettings(struct net_device *net,
1841 				      const struct ethtool_link_ksettings *cmd)
1842 {
1843 	struct lan78xx_net *dev = netdev_priv(net);
1844 
1845 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1846 }
1847 
1848 static void lan78xx_get_pause(struct net_device *net,
1849 			      struct ethtool_pauseparam *pause)
1850 {
1851 	struct lan78xx_net *dev = netdev_priv(net);
1852 
1853 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1854 }
1855 
1856 static int lan78xx_set_pause(struct net_device *net,
1857 			     struct ethtool_pauseparam *pause)
1858 {
1859 	struct lan78xx_net *dev = netdev_priv(net);
1860 
1861 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1862 }
1863 
1864 static int lan78xx_get_regs_len(struct net_device *netdev)
1865 {
1866 	return sizeof(lan78xx_regs);
1867 }
1868 
1869 static void
1870 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1871 		 void *buf)
1872 {
1873 	struct lan78xx_net *dev = netdev_priv(netdev);
1874 	unsigned int data_count = 0;
1875 	u32 *data = buf;
1876 	int i, ret;
1877 
1878 	/* Read Device/MAC registers */
1879 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1880 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1881 		if (ret < 0) {
1882 			netdev_warn(dev->net,
1883 				    "failed to read register 0x%08x\n",
1884 				    lan78xx_regs[i]);
1885 			goto clean_data;
1886 		}
1887 
1888 		data_count++;
1889 	}
1890 
1891 	return;
1892 
1893 clean_data:
1894 	memset(data, 0, data_count * sizeof(u32));
1895 }
1896 
1897 static const struct ethtool_ops lan78xx_ethtool_ops = {
1898 	.get_link	= ethtool_op_get_link,
1899 	.nway_reset	= phy_ethtool_nway_reset,
1900 	.get_drvinfo	= lan78xx_get_drvinfo,
1901 	.get_msglevel	= lan78xx_get_msglevel,
1902 	.set_msglevel	= lan78xx_set_msglevel,
1903 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1904 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1905 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1906 	.get_ethtool_stats = lan78xx_get_stats,
1907 	.get_sset_count = lan78xx_get_sset_count,
1908 	.self_test	= net_selftest,
1909 	.get_strings	= lan78xx_get_strings,
1910 	.get_wol	= lan78xx_get_wol,
1911 	.set_wol	= lan78xx_set_wol,
1912 	.get_ts_info	= ethtool_op_get_ts_info,
1913 	.get_eee	= lan78xx_get_eee,
1914 	.set_eee	= lan78xx_set_eee,
1915 	.get_pauseparam	= lan78xx_get_pause,
1916 	.set_pauseparam	= lan78xx_set_pause,
1917 	.get_link_ksettings = lan78xx_get_link_ksettings,
1918 	.set_link_ksettings = lan78xx_set_link_ksettings,
1919 	.get_regs_len	= lan78xx_get_regs_len,
1920 	.get_regs	= lan78xx_get_regs,
1921 };
1922 
1923 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1924 {
1925 	u32 addr_lo, addr_hi;
1926 	u8 addr[6];
1927 	int ret;
1928 
1929 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1930 	if (ret < 0)
1931 		return ret;
1932 
1933 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1934 	if (ret < 0)
1935 		return ret;
1936 
1937 	addr[0] = addr_lo & 0xFF;
1938 	addr[1] = (addr_lo >> 8) & 0xFF;
1939 	addr[2] = (addr_lo >> 16) & 0xFF;
1940 	addr[3] = (addr_lo >> 24) & 0xFF;
1941 	addr[4] = addr_hi & 0xFF;
1942 	addr[5] = (addr_hi >> 8) & 0xFF;
1943 
1944 	if (!is_valid_ether_addr(addr)) {
1945 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1946 			/* valid address present in Device Tree */
1947 			netif_dbg(dev, ifup, dev->net,
1948 				  "MAC address read from Device Tree");
1949 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1950 						 ETH_ALEN, addr) == 0) ||
1951 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1952 					      ETH_ALEN, addr) == 0)) &&
1953 			   is_valid_ether_addr(addr)) {
1954 			/* eeprom values are valid so use them */
1955 			netif_dbg(dev, ifup, dev->net,
1956 				  "MAC address read from EEPROM");
1957 		} else {
1958 			/* generate random MAC */
1959 			eth_random_addr(addr);
1960 			netif_dbg(dev, ifup, dev->net,
1961 				  "MAC address set to random addr");
1962 		}
1963 
1964 		addr_lo = addr[0] | (addr[1] << 8) |
1965 			  (addr[2] << 16) | (addr[3] << 24);
1966 		addr_hi = addr[4] | (addr[5] << 8);
1967 
1968 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1969 		if (ret < 0)
1970 			return ret;
1971 
1972 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1973 		if (ret < 0)
1974 			return ret;
1975 	}
1976 
1977 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1978 	if (ret < 0)
1979 		return ret;
1980 
1981 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1982 	if (ret < 0)
1983 		return ret;
1984 
1985 	eth_hw_addr_set(dev->net, addr);
1986 
1987 	return 0;
1988 }
1989 
1990 /* MDIO read and write wrappers for phylib */
1991 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1992 {
1993 	struct lan78xx_net *dev = bus->priv;
1994 	u32 val, addr;
1995 	int ret;
1996 
1997 	ret = usb_autopm_get_interface(dev->intf);
1998 	if (ret < 0)
1999 		return ret;
2000 
2001 	mutex_lock(&dev->mdiobus_mutex);
2002 
2003 	/* confirm MII not busy */
2004 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2005 	if (ret < 0)
2006 		goto done;
2007 
2008 	/* set the address, index & direction (read from PHY) */
2009 	addr = mii_access(phy_id, idx, MII_READ);
2010 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2011 	if (ret < 0)
2012 		goto done;
2013 
2014 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2015 	if (ret < 0)
2016 		goto done;
2017 
2018 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2019 	if (ret < 0)
2020 		goto done;
2021 
2022 	ret = (int)(val & 0xFFFF);
2023 
2024 done:
2025 	mutex_unlock(&dev->mdiobus_mutex);
2026 	usb_autopm_put_interface(dev->intf);
2027 
2028 	return ret;
2029 }
2030 
2031 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2032 				 u16 regval)
2033 {
2034 	struct lan78xx_net *dev = bus->priv;
2035 	u32 val, addr;
2036 	int ret;
2037 
2038 	ret = usb_autopm_get_interface(dev->intf);
2039 	if (ret < 0)
2040 		return ret;
2041 
2042 	mutex_lock(&dev->mdiobus_mutex);
2043 
2044 	/* confirm MII not busy */
2045 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2046 	if (ret < 0)
2047 		goto done;
2048 
2049 	val = (u32)regval;
2050 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2051 	if (ret < 0)
2052 		goto done;
2053 
2054 	/* set the address, index & direction (write to PHY) */
2055 	addr = mii_access(phy_id, idx, MII_WRITE);
2056 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2057 	if (ret < 0)
2058 		goto done;
2059 
2060 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2061 	if (ret < 0)
2062 		goto done;
2063 
2064 done:
2065 	mutex_unlock(&dev->mdiobus_mutex);
2066 	usb_autopm_put_interface(dev->intf);
2067 	return ret;
2068 }
2069 
2070 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2071 {
2072 	struct device_node *node;
2073 	int ret;
2074 
2075 	dev->mdiobus = mdiobus_alloc();
2076 	if (!dev->mdiobus) {
2077 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2078 		return -ENOMEM;
2079 	}
2080 
2081 	dev->mdiobus->priv = (void *)dev;
2082 	dev->mdiobus->read = lan78xx_mdiobus_read;
2083 	dev->mdiobus->write = lan78xx_mdiobus_write;
2084 	dev->mdiobus->name = "lan78xx-mdiobus";
2085 	dev->mdiobus->parent = &dev->udev->dev;
2086 
2087 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2088 		 dev->udev->bus->busnum, dev->udev->devnum);
2089 
2090 	switch (dev->chipid) {
2091 	case ID_REV_CHIP_ID_7800_:
2092 	case ID_REV_CHIP_ID_7850_:
2093 		/* set to internal PHY id */
2094 		dev->mdiobus->phy_mask = ~(1 << 1);
2095 		break;
2096 	case ID_REV_CHIP_ID_7801_:
2097 		/* scan thru PHYAD[2..0] */
2098 		dev->mdiobus->phy_mask = ~(0xFF);
2099 		break;
2100 	}
2101 
2102 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2103 	ret = of_mdiobus_register(dev->mdiobus, node);
2104 	of_node_put(node);
2105 	if (ret) {
2106 		netdev_err(dev->net, "can't register MDIO bus\n");
2107 		goto exit1;
2108 	}
2109 
2110 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2111 	return 0;
2112 exit1:
2113 	mdiobus_free(dev->mdiobus);
2114 	return ret;
2115 }
2116 
2117 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2118 {
2119 	mdiobus_unregister(dev->mdiobus);
2120 	mdiobus_free(dev->mdiobus);
2121 }
2122 
2123 static int irq_map(struct irq_domain *d, unsigned int irq,
2124 		   irq_hw_number_t hwirq)
2125 {
2126 	struct irq_domain_data *data = d->host_data;
2127 
2128 	irq_set_chip_data(irq, data);
2129 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2130 	irq_set_noprobe(irq);
2131 
2132 	return 0;
2133 }
2134 
2135 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2136 {
2137 	irq_set_chip_and_handler(irq, NULL, NULL);
2138 	irq_set_chip_data(irq, NULL);
2139 }
2140 
2141 static const struct irq_domain_ops chip_domain_ops = {
2142 	.map	= irq_map,
2143 	.unmap	= irq_unmap,
2144 };
2145 
2146 static void lan78xx_irq_mask(struct irq_data *irqd)
2147 {
2148 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2149 
2150 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2151 }
2152 
2153 static void lan78xx_irq_unmask(struct irq_data *irqd)
2154 {
2155 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2156 
2157 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2158 }
2159 
2160 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2161 {
2162 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2163 
2164 	mutex_lock(&data->irq_lock);
2165 }
2166 
2167 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2168 {
2169 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2170 	struct lan78xx_net *dev =
2171 			container_of(data, struct lan78xx_net, domain_data);
2172 	u32 buf;
2173 	int ret;
2174 
2175 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2176 	 * are only two callbacks executed in non-atomic contex.
2177 	 */
2178 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2179 	if (ret < 0)
2180 		goto irq_bus_sync_unlock;
2181 
2182 	if (buf != data->irqenable)
2183 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2184 
2185 irq_bus_sync_unlock:
2186 	if (ret < 0)
2187 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2188 			   ERR_PTR(ret));
2189 
2190 	mutex_unlock(&data->irq_lock);
2191 }
2192 
2193 static struct irq_chip lan78xx_irqchip = {
2194 	.name			= "lan78xx-irqs",
2195 	.irq_mask		= lan78xx_irq_mask,
2196 	.irq_unmask		= lan78xx_irq_unmask,
2197 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2198 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2199 };
2200 
2201 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2202 {
2203 	struct irq_domain *irqdomain;
2204 	unsigned int irqmap = 0;
2205 	u32 buf;
2206 	int ret = 0;
2207 
2208 	mutex_init(&dev->domain_data.irq_lock);
2209 
2210 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2211 	if (ret < 0)
2212 		return ret;
2213 
2214 	dev->domain_data.irqenable = buf;
2215 
2216 	dev->domain_data.irqchip = &lan78xx_irqchip;
2217 	dev->domain_data.irq_handler = handle_simple_irq;
2218 
2219 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2220 					     &chip_domain_ops, &dev->domain_data);
2221 	if (irqdomain) {
2222 		/* create mapping for PHY interrupt */
2223 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2224 		if (!irqmap) {
2225 			irq_domain_remove(irqdomain);
2226 
2227 			irqdomain = NULL;
2228 			ret = -EINVAL;
2229 		}
2230 	} else {
2231 		ret = -EINVAL;
2232 	}
2233 
2234 	dev->domain_data.irqdomain = irqdomain;
2235 	dev->domain_data.phyirq = irqmap;
2236 
2237 	return ret;
2238 }
2239 
2240 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2241 {
2242 	if (dev->domain_data.phyirq > 0) {
2243 		irq_dispose_mapping(dev->domain_data.phyirq);
2244 
2245 		if (dev->domain_data.irqdomain)
2246 			irq_domain_remove(dev->domain_data.irqdomain);
2247 	}
2248 	dev->domain_data.phyirq = 0;
2249 	dev->domain_data.irqdomain = NULL;
2250 }
2251 
2252 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2253 			       const struct phylink_link_state *state)
2254 {
2255 	struct net_device *net = to_net_dev(config->dev);
2256 	struct lan78xx_net *dev = netdev_priv(net);
2257 	u32 mac_cr = 0;
2258 	int ret;
2259 
2260 	/* Check if the mode is supported */
2261 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2262 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2263 		return;
2264 	}
2265 
2266 	switch (state->interface) {
2267 	case PHY_INTERFACE_MODE_GMII:
2268 		mac_cr |= MAC_CR_GMII_EN_;
2269 		break;
2270 	case PHY_INTERFACE_MODE_RGMII:
2271 	case PHY_INTERFACE_MODE_RGMII_ID:
2272 	case PHY_INTERFACE_MODE_RGMII_TXID:
2273 	case PHY_INTERFACE_MODE_RGMII_RXID:
2274 		break;
2275 	default:
2276 		netdev_warn(net, "Unsupported interface mode: %d\n",
2277 			    state->interface);
2278 		return;
2279 	}
2280 
2281 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2282 	if (ret < 0)
2283 		netdev_err(net, "Failed to config MAC with error %pe\n",
2284 			   ERR_PTR(ret));
2285 }
2286 
2287 static void lan78xx_mac_link_down(struct phylink_config *config,
2288 				  unsigned int mode, phy_interface_t interface)
2289 {
2290 	struct net_device *net = to_net_dev(config->dev);
2291 	struct lan78xx_net *dev = netdev_priv(net);
2292 	int ret;
2293 
2294 	netif_stop_queue(net);
2295 
2296 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2297 	 * manually before reset. TX and RX should be disabled before running
2298 	 * link_up sequence.
2299 	 */
2300 	ret = lan78xx_stop_tx_path(dev);
2301 	if (ret < 0)
2302 		goto link_down_fail;
2303 
2304 	ret = lan78xx_stop_rx_path(dev);
2305 	if (ret < 0)
2306 		goto link_down_fail;
2307 
2308 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2309 	 * really needed, but it was done in previous driver version. So, leave
2310 	 * it here.
2311 	 */
2312 	ret = lan78xx_mac_reset(dev);
2313 	if (ret < 0)
2314 		goto link_down_fail;
2315 
2316 	return;
2317 
2318 link_down_fail:
2319 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2320 		   ERR_PTR(ret));
2321 }
2322 
2323 /**
2324  * lan78xx_configure_usb - Configure USB link power settings
2325  * @dev: pointer to the LAN78xx device structure
2326  * @speed: negotiated Ethernet link speed (in Mbps)
2327  *
2328  * This function configures U1/U2 link power management for SuperSpeed
2329  * USB devices based on the current Ethernet link speed. It uses the
2330  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2331  *
2332  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2333  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2334  *
2335  * Return: 0 on success or a negative error code on failure.
2336  */
2337 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2338 {
2339 	u32 mask, val;
2340 	int ret;
2341 
2342 	/* Only configure USB settings for SuperSpeed devices */
2343 	if (dev->udev->speed != USB_SPEED_SUPER)
2344 		return 0;
2345 
2346 	/* LAN7850 does not support USB 3.x */
2347 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2348 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2349 		return 0;
2350 	}
2351 
2352 	switch (speed) {
2353 	case SPEED_1000:
2354 		/* Disable U2, enable U1 */
2355 		ret = lan78xx_update_reg(dev, USB_CFG1,
2356 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2357 		if (ret < 0)
2358 			return ret;
2359 
2360 		return lan78xx_update_reg(dev, USB_CFG1,
2361 					  USB_CFG1_DEV_U1_INIT_EN_,
2362 					  USB_CFG1_DEV_U1_INIT_EN_);
2363 
2364 	case SPEED_100:
2365 	case SPEED_10:
2366 		/* Enable both U1 and U2 */
2367 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2368 		val = mask;
2369 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2370 
2371 	default:
2372 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2373 		return -EINVAL;
2374 	}
2375 }
2376 
2377 /**
2378  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2379  * @dev: pointer to the LAN78xx device structure
2380  * @tx_pause: enable transmission of pause frames
2381  * @rx_pause: enable reception of pause frames
2382  *
2383  * This function configures the LAN78xx flow control settings by writing
2384  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2385  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2386  * based on USB speed.
2387  *
2388  * The Pause Time field is measured in units of 512-bit times (quanta):
2389  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2390  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2391  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2392  *
2393  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2394  *   - RXUSED is the number of bytes used in the RX FIFO
2395  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2396  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2397  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2398  *
2399  * Thresholds differ by USB speed because available USB bandwidth
2400  * affects how fast packets can be drained from the RX FIFO:
2401  *   - USB 3.x (SuperSpeed):
2402  *       FLOW_ON  = 9216 bytes → 18 units
2403  *       FLOW_OFF = 4096 bytes →  8 units
2404  *   - USB 2.0 (High-Speed):
2405  *       FLOW_ON  = 8704 bytes → 17 units
2406  *       FLOW_OFF = 1024 bytes →  2 units
2407  *
2408  * Note: The FCT_FLOW register must be configured before enabling TX pause
2409  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2410  *
2411  * Return: 0 on success or a negative error code on failure.
2412  */
2413 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2414 					 bool tx_pause, bool rx_pause)
2415 {
2416 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2417 	const u32 pause_time_quanta = 65535;
2418 	u32 fct_flow = 0;
2419 	u32 flow = 0;
2420 	int ret;
2421 
2422 	/* Prepare MAC flow control bits */
2423 	if (tx_pause)
2424 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2425 
2426 	if (rx_pause)
2427 		flow |= FLOW_CR_RX_FCEN_;
2428 
2429 	/* Select RX FIFO thresholds based on USB speed
2430 	 *
2431 	 * FCT_FLOW layout:
2432 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2433 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2434 	 *   thresholds are expressed in units of 512 bytes
2435 	 */
2436 	switch (dev->udev->speed) {
2437 	case USB_SPEED_SUPER:
2438 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2439 		break;
2440 	case USB_SPEED_HIGH:
2441 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2442 		break;
2443 	default:
2444 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2445 			    dev->udev->speed);
2446 		return -EINVAL;
2447 	}
2448 
2449 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2450 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2451 	if (ret < 0)
2452 		return ret;
2453 
2454 	/* Step 2: Enable MAC pause functionality */
2455 	return lan78xx_write_reg(dev, FLOW, flow);
2456 }
2457 
2458 static void lan78xx_mac_link_up(struct phylink_config *config,
2459 				struct phy_device *phy,
2460 				unsigned int mode, phy_interface_t interface,
2461 				int speed, int duplex,
2462 				bool tx_pause, bool rx_pause)
2463 {
2464 	struct net_device *net = to_net_dev(config->dev);
2465 	struct lan78xx_net *dev = netdev_priv(net);
2466 	u32 mac_cr = 0;
2467 	int ret;
2468 
2469 	switch (speed) {
2470 	case SPEED_1000:
2471 		mac_cr |= MAC_CR_SPEED_1000_;
2472 		break;
2473 	case SPEED_100:
2474 		mac_cr |= MAC_CR_SPEED_100_;
2475 		break;
2476 	case SPEED_10:
2477 		mac_cr |= MAC_CR_SPEED_10_;
2478 		break;
2479 	default:
2480 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2481 		return;
2482 	}
2483 
2484 	if (duplex == DUPLEX_FULL)
2485 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2486 
2487 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2488 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2489 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2490 	if (ret < 0)
2491 		goto link_up_fail;
2492 
2493 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2494 	if (ret < 0)
2495 		goto link_up_fail;
2496 
2497 	ret = lan78xx_configure_usb(dev, speed);
2498 	if (ret < 0)
2499 		goto link_up_fail;
2500 
2501 	lan78xx_rx_urb_submit_all(dev);
2502 
2503 	ret = lan78xx_flush_rx_fifo(dev);
2504 	if (ret < 0)
2505 		goto link_up_fail;
2506 
2507 	ret = lan78xx_flush_tx_fifo(dev);
2508 	if (ret < 0)
2509 		goto link_up_fail;
2510 
2511 	ret = lan78xx_start_tx_path(dev);
2512 	if (ret < 0)
2513 		goto link_up_fail;
2514 
2515 	ret = lan78xx_start_rx_path(dev);
2516 	if (ret < 0)
2517 		goto link_up_fail;
2518 
2519 	netif_start_queue(net);
2520 
2521 	return;
2522 
2523 link_up_fail:
2524 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2525 		   ERR_PTR(ret));
2526 }
2527 
2528 /**
2529  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2530  * @dev: LAN78xx device
2531  * @enable: true to enable EEE, false to disable
2532  *
2533  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2534  * Efficient Ethernet (EEE) operation. According to current understanding
2535  * of the LAN7800 documentation, this bit can be modified while TX and RX
2536  * are enabled. No explicit requirement was found to disable data paths
2537  * before changing this bit.
2538  *
2539  * Return: 0 on success or a negative error code
2540  */
2541 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2542 {
2543 	u32 mac_cr = 0;
2544 
2545 	if (enable)
2546 		mac_cr |= MAC_CR_EEE_EN_;
2547 
2548 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2549 }
2550 
2551 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2552 {
2553 	struct net_device *net = to_net_dev(config->dev);
2554 	struct lan78xx_net *dev = netdev_priv(net);
2555 
2556 	lan78xx_mac_eee_enable(dev, false);
2557 }
2558 
2559 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2560 				     bool tx_clk_stop)
2561 {
2562 	struct net_device *net = to_net_dev(config->dev);
2563 	struct lan78xx_net *dev = netdev_priv(net);
2564 	int ret;
2565 
2566 	/* Software should only change this field when Energy Efficient
2567 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2568 	 * EEEEN during probe, and phylink itself guarantees that
2569 	 * mac_disable_tx_lpi() will have been previously called.
2570 	 */
2571 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2572 	if (ret < 0)
2573 		return ret;
2574 
2575 	return lan78xx_mac_eee_enable(dev, true);
2576 }
2577 
2578 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2579 	.mac_config = lan78xx_mac_config,
2580 	.mac_link_down = lan78xx_mac_link_down,
2581 	.mac_link_up = lan78xx_mac_link_up,
2582 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2583 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2584 };
2585 
2586 /**
2587  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2588  * @dev: LAN78xx device
2589  *
2590  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2591  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2592  * to a switch without a visible PHY.
2593  *
2594  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2595  */
2596 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2597 {
2598 	static const struct phylink_link_state state = {
2599 		.speed = SPEED_1000,
2600 		.duplex = DUPLEX_FULL,
2601 	};
2602 
2603 	netdev_info(dev->net,
2604 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2605 
2606 	return phylink_set_fixed_link(dev->phylink, &state);
2607 }
2608 
2609 /**
2610  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2611  * @dev: LAN78xx device structure
2612  *
2613  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2614  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2615  * sets dev->interface based on chip ID and detected PHY type.
2616  *
2617  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2618  */
2619 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2620 {
2621 	struct phy_device *phydev;
2622 
2623 	/* Attempt to locate a PHY on the MDIO bus */
2624 	phydev = phy_find_first(dev->mdiobus);
2625 
2626 	switch (dev->chipid) {
2627 	case ID_REV_CHIP_ID_7801_:
2628 		if (phydev) {
2629 			/* External RGMII PHY detected */
2630 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2631 			phydev->is_internal = false;
2632 
2633 			if (!phydev->drv)
2634 				netdev_warn(dev->net,
2635 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2636 
2637 			return phydev;
2638 		}
2639 
2640 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2641 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2642 		return NULL;
2643 
2644 	case ID_REV_CHIP_ID_7800_:
2645 	case ID_REV_CHIP_ID_7850_:
2646 		if (!phydev)
2647 			return ERR_PTR(-ENODEV);
2648 
2649 		/* These use internal GMII-connected PHY */
2650 		dev->interface = PHY_INTERFACE_MODE_GMII;
2651 		phydev->is_internal = true;
2652 		return phydev;
2653 
2654 	default:
2655 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2656 		return ERR_PTR(-ENODEV);
2657 	}
2658 }
2659 
2660 /**
2661  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2662  * @dev: LAN78xx device
2663  *
2664  * Configure MAC-side registers according to dev->interface, which should be
2665  * set by lan78xx_get_phy().
2666  *
2667  * - For PHY_INTERFACE_MODE_RGMII:
2668  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2669  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2670  *   connected to the KSZ9897 switch, and the link timing is expected to be
2671  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2672  *   assumed here.
2673  *
2674  * - For PHY_INTERFACE_MODE_RGMII_ID:
2675  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2676  *
2677  * - For GMII, no MAC-specific config is needed.
2678  *
2679  * Return: 0 on success or a negative error code.
2680  */
2681 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2682 {
2683 	int ret;
2684 
2685 	switch (dev->interface) {
2686 	case PHY_INTERFACE_MODE_RGMII:
2687 		/* Enable MAC-side TX clock delay */
2688 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2689 					MAC_RGMII_ID_TXC_DELAY_EN_);
2690 		if (ret < 0)
2691 			return ret;
2692 
2693 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2694 		if (ret < 0)
2695 			return ret;
2696 
2697 		ret = lan78xx_update_reg(dev, HW_CFG,
2698 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2699 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2700 		if (ret < 0)
2701 			return ret;
2702 
2703 		break;
2704 
2705 	case PHY_INTERFACE_MODE_RGMII_ID:
2706 		/* Disable MAC-side TXC delay, PHY provides it */
2707 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2708 		if (ret < 0)
2709 			return ret;
2710 
2711 		break;
2712 
2713 	case PHY_INTERFACE_MODE_GMII:
2714 		/* No MAC-specific configuration required */
2715 		break;
2716 
2717 	default:
2718 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2719 			    dev->interface);
2720 		break;
2721 	}
2722 
2723 	return 0;
2724 }
2725 
2726 /**
2727  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2728  * @dev: LAN78xx device
2729  * @phydev: PHY device (must be valid)
2730  *
2731  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2732  * the corresponding number of LEDs by writing to HW_CFG.
2733  *
2734  * This helper preserves the original logic, enabling up to 4 LEDs.
2735  * If the property is not present, this function does nothing.
2736  *
2737  * Return: 0 on success or a negative error code.
2738  */
2739 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2740 					  struct phy_device *phydev)
2741 {
2742 	struct device_node *np = phydev->mdio.dev.of_node;
2743 	u32 reg;
2744 	int len, ret;
2745 
2746 	if (!np)
2747 		return 0;
2748 
2749 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2750 					      sizeof(u32));
2751 	if (len < 0)
2752 		return 0;
2753 
2754 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2755 	if (ret < 0)
2756 		return ret;
2757 
2758 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2759 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2760 
2761 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2762 	       (len > 1) * HW_CFG_LED1_EN_ |
2763 	       (len > 2) * HW_CFG_LED2_EN_ |
2764 	       (len > 3) * HW_CFG_LED3_EN_;
2765 
2766 	return lan78xx_write_reg(dev, HW_CFG, reg);
2767 }
2768 
2769 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2770 {
2771 	struct phylink_config *pc = &dev->phylink_config;
2772 	struct phylink *phylink;
2773 
2774 	pc->dev = &dev->net->dev;
2775 	pc->type = PHYLINK_NETDEV;
2776 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2777 			       MAC_100 | MAC_1000FD;
2778 	pc->mac_managed_pm = true;
2779 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2780 	/*
2781 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2782 	 *
2783 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2784 	 *
2785 	 * Reasoning:
2786 	 * According to the application note in the LAN7800 documentation, a
2787 	 * zero delay may negatively impact the TX data path’s ability to
2788 	 * support Gigabit operation. A value of 50us is recommended as a
2789 	 * reasonable default when the part operates at Gigabit speeds,
2790 	 * balancing stability and power efficiency in EEE mode. This delay can
2791 	 * be increased based on performance testing, as EEE is designed for
2792 	 * scenarios with mostly idle links and occasional bursts of full
2793 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2794 	 * performance without overly aggressive power optimization during
2795 	 * inactive periods.
2796 	 */
2797 	pc->lpi_timer_default = 50;
2798 	pc->eee_enabled_default = true;
2799 
2800 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2801 		phy_interface_set_rgmii(pc->supported_interfaces);
2802 	else
2803 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2804 
2805 	memcpy(dev->phylink_config.lpi_interfaces,
2806 	       dev->phylink_config.supported_interfaces,
2807 	       sizeof(dev->phylink_config.lpi_interfaces));
2808 
2809 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2810 				 dev->interface, &lan78xx_phylink_mac_ops);
2811 	if (IS_ERR(phylink))
2812 		return PTR_ERR(phylink);
2813 
2814 	dev->phylink = phylink;
2815 
2816 	return 0;
2817 }
2818 
2819 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2820 {
2821 	if (dev->phylink) {
2822 		phylink_disconnect_phy(dev->phylink);
2823 		phylink_destroy(dev->phylink);
2824 		dev->phylink = NULL;
2825 	}
2826 }
2827 
2828 static int lan78xx_phy_init(struct lan78xx_net *dev)
2829 {
2830 	struct phy_device *phydev;
2831 	int ret;
2832 
2833 	phydev = lan78xx_get_phy(dev);
2834 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2835 	 * which will use a fixed link later.
2836 	 * If an  error occurs, return the error code immediately.
2837 	 */
2838 	if (IS_ERR(phydev))
2839 		return PTR_ERR(phydev);
2840 
2841 	ret = lan78xx_phylink_setup(dev);
2842 	if (ret < 0)
2843 		return ret;
2844 
2845 	ret = lan78xx_mac_prepare_for_phy(dev);
2846 	if (ret < 0)
2847 		goto phylink_uninit;
2848 
2849 	/* If no PHY is found, set up a fixed link. It is very specific to
2850 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2851 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2852 	 * a visible PHY.
2853 	 */
2854 	if (!phydev) {
2855 		ret = lan78xx_set_fixed_link(dev);
2856 		if (ret < 0)
2857 			goto phylink_uninit;
2858 
2859 		/* No PHY found, so set up a fixed link and return early.
2860 		 * No need to configure PHY IRQ or attach to phylink.
2861 		 */
2862 		return 0;
2863 	}
2864 
2865 	/* if phyirq is not set, use polling mode in phylib */
2866 	if (dev->domain_data.phyirq > 0)
2867 		phydev->irq = dev->domain_data.phyirq;
2868 	else
2869 		phydev->irq = PHY_POLL;
2870 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2871 
2872 	ret = phylink_connect_phy(dev->phylink, phydev);
2873 	if (ret) {
2874 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2875 			   dev->mdiobus->id, ERR_PTR(ret));
2876 		goto phylink_uninit;
2877 	}
2878 
2879 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2880 	if (ret < 0)
2881 		goto phylink_uninit;
2882 
2883 	return 0;
2884 
2885 phylink_uninit:
2886 	lan78xx_phy_uninit(dev);
2887 
2888 	return ret;
2889 }
2890 
2891 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2892 {
2893 	bool rxenabled;
2894 	u32 buf;
2895 	int ret;
2896 
2897 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2898 	if (ret < 0)
2899 		return ret;
2900 
2901 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2902 
2903 	if (rxenabled) {
2904 		buf &= ~MAC_RX_RXEN_;
2905 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2906 		if (ret < 0)
2907 			return ret;
2908 	}
2909 
2910 	/* add 4 to size for FCS */
2911 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2912 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2913 
2914 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2915 	if (ret < 0)
2916 		return ret;
2917 
2918 	if (rxenabled) {
2919 		buf |= MAC_RX_RXEN_;
2920 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2921 		if (ret < 0)
2922 			return ret;
2923 	}
2924 
2925 	return 0;
2926 }
2927 
2928 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2929 {
2930 	struct sk_buff *skb;
2931 	unsigned long flags;
2932 	int count = 0;
2933 
2934 	spin_lock_irqsave(&q->lock, flags);
2935 	while (!skb_queue_empty(q)) {
2936 		struct skb_data	*entry;
2937 		struct urb *urb;
2938 		int ret;
2939 
2940 		skb_queue_walk(q, skb) {
2941 			entry = (struct skb_data *)skb->cb;
2942 			if (entry->state != unlink_start)
2943 				goto found;
2944 		}
2945 		break;
2946 found:
2947 		entry->state = unlink_start;
2948 		urb = entry->urb;
2949 
2950 		/* Get reference count of the URB to avoid it to be
2951 		 * freed during usb_unlink_urb, which may trigger
2952 		 * use-after-free problem inside usb_unlink_urb since
2953 		 * usb_unlink_urb is always racing with .complete
2954 		 * handler(include defer_bh).
2955 		 */
2956 		usb_get_urb(urb);
2957 		spin_unlock_irqrestore(&q->lock, flags);
2958 		/* during some PM-driven resume scenarios,
2959 		 * these (async) unlinks complete immediately
2960 		 */
2961 		ret = usb_unlink_urb(urb);
2962 		if (ret != -EINPROGRESS && ret != 0)
2963 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2964 		else
2965 			count++;
2966 		usb_put_urb(urb);
2967 		spin_lock_irqsave(&q->lock, flags);
2968 	}
2969 	spin_unlock_irqrestore(&q->lock, flags);
2970 	return count;
2971 }
2972 
2973 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2974 {
2975 	struct lan78xx_net *dev = netdev_priv(netdev);
2976 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2977 	int ret;
2978 
2979 	/* no second zero-length packet read wanted after mtu-sized packets */
2980 	if ((max_frame_len % dev->maxpacket) == 0)
2981 		return -EDOM;
2982 
2983 	ret = usb_autopm_get_interface(dev->intf);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2988 	if (ret < 0)
2989 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2990 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2991 	else
2992 		WRITE_ONCE(netdev->mtu, new_mtu);
2993 
2994 	usb_autopm_put_interface(dev->intf);
2995 
2996 	return ret;
2997 }
2998 
2999 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
3000 {
3001 	struct lan78xx_net *dev = netdev_priv(netdev);
3002 	struct sockaddr *addr = p;
3003 	u32 addr_lo, addr_hi;
3004 	int ret;
3005 
3006 	if (netif_running(netdev))
3007 		return -EBUSY;
3008 
3009 	if (!is_valid_ether_addr(addr->sa_data))
3010 		return -EADDRNOTAVAIL;
3011 
3012 	eth_hw_addr_set(netdev, addr->sa_data);
3013 
3014 	addr_lo = netdev->dev_addr[0] |
3015 		  netdev->dev_addr[1] << 8 |
3016 		  netdev->dev_addr[2] << 16 |
3017 		  netdev->dev_addr[3] << 24;
3018 	addr_hi = netdev->dev_addr[4] |
3019 		  netdev->dev_addr[5] << 8;
3020 
3021 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3022 	if (ret < 0)
3023 		return ret;
3024 
3025 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3026 	if (ret < 0)
3027 		return ret;
3028 
3029 	/* Added to support MAC address changes */
3030 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3031 	if (ret < 0)
3032 		return ret;
3033 
3034 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3035 }
3036 
3037 /* Enable or disable Rx checksum offload engine */
3038 static int lan78xx_set_features(struct net_device *netdev,
3039 				netdev_features_t features)
3040 {
3041 	struct lan78xx_net *dev = netdev_priv(netdev);
3042 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3043 	unsigned long flags;
3044 
3045 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3046 
3047 	if (features & NETIF_F_RXCSUM) {
3048 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3049 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3050 	} else {
3051 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3052 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3053 	}
3054 
3055 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3056 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3057 	else
3058 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3059 
3060 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3061 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3062 	else
3063 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3064 
3065 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3066 
3067 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3068 }
3069 
3070 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3071 {
3072 	struct lan78xx_priv *pdata =
3073 			container_of(param, struct lan78xx_priv, set_vlan);
3074 	struct lan78xx_net *dev = pdata->dev;
3075 
3076 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3077 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3078 }
3079 
3080 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3081 				   __be16 proto, u16 vid)
3082 {
3083 	struct lan78xx_net *dev = netdev_priv(netdev);
3084 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3085 	u16 vid_bit_index;
3086 	u16 vid_dword_index;
3087 
3088 	vid_dword_index = (vid >> 5) & 0x7F;
3089 	vid_bit_index = vid & 0x1F;
3090 
3091 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3092 
3093 	/* defer register writes to a sleepable context */
3094 	schedule_work(&pdata->set_vlan);
3095 
3096 	return 0;
3097 }
3098 
3099 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3100 				    __be16 proto, u16 vid)
3101 {
3102 	struct lan78xx_net *dev = netdev_priv(netdev);
3103 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3104 	u16 vid_bit_index;
3105 	u16 vid_dword_index;
3106 
3107 	vid_dword_index = (vid >> 5) & 0x7F;
3108 	vid_bit_index = vid & 0x1F;
3109 
3110 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3111 
3112 	/* defer register writes to a sleepable context */
3113 	schedule_work(&pdata->set_vlan);
3114 
3115 	return 0;
3116 }
3117 
3118 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3119 {
3120 	u32 regs[6] = { 0 };
3121 	int ret;
3122 	u32 buf;
3123 
3124 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3125 	if (ret < 0)
3126 		goto init_ltm_failed;
3127 
3128 	if (buf & USB_CFG1_LTM_ENABLE_) {
3129 		u8 temp[2];
3130 		/* Get values from EEPROM first */
3131 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3132 			if (temp[0] == 24) {
3133 				ret = lan78xx_read_raw_eeprom(dev,
3134 							      temp[1] * 2,
3135 							      24,
3136 							      (u8 *)regs);
3137 				if (ret < 0)
3138 					return ret;
3139 			}
3140 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3141 			if (temp[0] == 24) {
3142 				ret = lan78xx_read_raw_otp(dev,
3143 							   temp[1] * 2,
3144 							   24,
3145 							   (u8 *)regs);
3146 				if (ret < 0)
3147 					return ret;
3148 			}
3149 		}
3150 	}
3151 
3152 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3153 	if (ret < 0)
3154 		goto init_ltm_failed;
3155 
3156 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3157 	if (ret < 0)
3158 		goto init_ltm_failed;
3159 
3160 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3161 	if (ret < 0)
3162 		goto init_ltm_failed;
3163 
3164 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3165 	if (ret < 0)
3166 		goto init_ltm_failed;
3167 
3168 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3169 	if (ret < 0)
3170 		goto init_ltm_failed;
3171 
3172 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3173 	if (ret < 0)
3174 		goto init_ltm_failed;
3175 
3176 	return 0;
3177 
3178 init_ltm_failed:
3179 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3180 	return ret;
3181 }
3182 
3183 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3184 {
3185 	int result = 0;
3186 
3187 	switch (dev->udev->speed) {
3188 	case USB_SPEED_SUPER:
3189 		dev->rx_urb_size = RX_SS_URB_SIZE;
3190 		dev->tx_urb_size = TX_SS_URB_SIZE;
3191 		dev->n_rx_urbs = RX_SS_URB_NUM;
3192 		dev->n_tx_urbs = TX_SS_URB_NUM;
3193 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3194 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3195 		break;
3196 	case USB_SPEED_HIGH:
3197 		dev->rx_urb_size = RX_HS_URB_SIZE;
3198 		dev->tx_urb_size = TX_HS_URB_SIZE;
3199 		dev->n_rx_urbs = RX_HS_URB_NUM;
3200 		dev->n_tx_urbs = TX_HS_URB_NUM;
3201 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3202 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3203 		break;
3204 	case USB_SPEED_FULL:
3205 		dev->rx_urb_size = RX_FS_URB_SIZE;
3206 		dev->tx_urb_size = TX_FS_URB_SIZE;
3207 		dev->n_rx_urbs = RX_FS_URB_NUM;
3208 		dev->n_tx_urbs = TX_FS_URB_NUM;
3209 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3210 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3211 		break;
3212 	default:
3213 		netdev_warn(dev->net, "USB bus speed not supported\n");
3214 		result = -EIO;
3215 		break;
3216 	}
3217 
3218 	return result;
3219 }
3220 
3221 static int lan78xx_reset(struct lan78xx_net *dev)
3222 {
3223 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3224 	unsigned long timeout;
3225 	int ret;
3226 	u32 buf;
3227 
3228 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3229 	if (ret < 0)
3230 		return ret;
3231 
3232 	buf |= HW_CFG_LRST_;
3233 
3234 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3235 	if (ret < 0)
3236 		return ret;
3237 
3238 	timeout = jiffies + HZ;
3239 	do {
3240 		mdelay(1);
3241 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3242 		if (ret < 0)
3243 			return ret;
3244 
3245 		if (time_after(jiffies, timeout)) {
3246 			netdev_warn(dev->net,
3247 				    "timeout on completion of LiteReset");
3248 			ret = -ETIMEDOUT;
3249 			return ret;
3250 		}
3251 	} while (buf & HW_CFG_LRST_);
3252 
3253 	ret = lan78xx_init_mac_address(dev);
3254 	if (ret < 0)
3255 		return ret;
3256 
3257 	/* save DEVID for later usage */
3258 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3259 	if (ret < 0)
3260 		return ret;
3261 
3262 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3263 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3264 
3265 	/* Respond to the IN token with a NAK */
3266 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3267 	if (ret < 0)
3268 		return ret;
3269 
3270 	buf |= USB_CFG_BIR_;
3271 
3272 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3273 	if (ret < 0)
3274 		return ret;
3275 
3276 	/* Init LTM */
3277 	ret = lan78xx_init_ltm(dev);
3278 	if (ret < 0)
3279 		return ret;
3280 
3281 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3282 	if (ret < 0)
3283 		return ret;
3284 
3285 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3286 	if (ret < 0)
3287 		return ret;
3288 
3289 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3290 	if (ret < 0)
3291 		return ret;
3292 
3293 	buf |= HW_CFG_MEF_;
3294 	buf |= HW_CFG_CLK125_EN_;
3295 	buf |= HW_CFG_REFCLK25_EN_;
3296 
3297 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3298 	if (ret < 0)
3299 		return ret;
3300 
3301 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3302 	if (ret < 0)
3303 		return ret;
3304 
3305 	buf |= USB_CFG_BCE_;
3306 
3307 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3308 	if (ret < 0)
3309 		return ret;
3310 
3311 	/* set FIFO sizes */
3312 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3313 
3314 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3315 	if (ret < 0)
3316 		return ret;
3317 
3318 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3319 
3320 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3321 	if (ret < 0)
3322 		return ret;
3323 
3324 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3325 	if (ret < 0)
3326 		return ret;
3327 
3328 	ret = lan78xx_write_reg(dev, FLOW, 0);
3329 	if (ret < 0)
3330 		return ret;
3331 
3332 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3333 	if (ret < 0)
3334 		return ret;
3335 
3336 	/* Don't need rfe_ctl_lock during initialisation */
3337 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3338 	if (ret < 0)
3339 		return ret;
3340 
3341 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3342 
3343 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3344 	if (ret < 0)
3345 		return ret;
3346 
3347 	/* Enable or disable checksum offload engines */
3348 	ret = lan78xx_set_features(dev->net, dev->net->features);
3349 	if (ret < 0)
3350 		return ret;
3351 
3352 	lan78xx_set_multicast(dev->net);
3353 
3354 	/* reset PHY */
3355 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3356 	if (ret < 0)
3357 		return ret;
3358 
3359 	buf |= PMT_CTL_PHY_RST_;
3360 
3361 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3362 	if (ret < 0)
3363 		return ret;
3364 
3365 	timeout = jiffies + HZ;
3366 	do {
3367 		mdelay(1);
3368 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3369 		if (ret < 0)
3370 			return ret;
3371 
3372 		if (time_after(jiffies, timeout)) {
3373 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3374 			ret = -ETIMEDOUT;
3375 			return ret;
3376 		}
3377 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3378 
3379 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3380 	if (ret < 0)
3381 		return ret;
3382 
3383 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3384 
3385 	/* LAN7801 only has RGMII mode */
3386 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3387 		buf &= ~MAC_CR_GMII_EN_;
3388 
3389 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3390 	if (ret < 0)
3391 		return ret;
3392 
3393 	ret = lan78xx_set_rx_max_frame_length(dev,
3394 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3395 
3396 	return ret;
3397 }
3398 
3399 static void lan78xx_init_stats(struct lan78xx_net *dev)
3400 {
3401 	u32 *p;
3402 	int i;
3403 
3404 	/* initialize for stats update
3405 	 * some counters are 20bits and some are 32bits
3406 	 */
3407 	p = (u32 *)&dev->stats.rollover_max;
3408 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3409 		p[i] = 0xFFFFF;
3410 
3411 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3418 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3419 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3420 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3421 
3422 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3423 }
3424 
3425 static int lan78xx_open(struct net_device *net)
3426 {
3427 	struct lan78xx_net *dev = netdev_priv(net);
3428 	int ret;
3429 
3430 	netif_dbg(dev, ifup, dev->net, "open device");
3431 
3432 	ret = usb_autopm_get_interface(dev->intf);
3433 	if (ret < 0)
3434 		return ret;
3435 
3436 	mutex_lock(&dev->dev_mutex);
3437 
3438 	lan78xx_init_stats(dev);
3439 
3440 	napi_enable(&dev->napi);
3441 
3442 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3443 
3444 	/* for Link Check */
3445 	if (dev->urb_intr) {
3446 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3447 		if (ret < 0) {
3448 			netif_err(dev, ifup, dev->net,
3449 				  "intr submit %d\n", ret);
3450 			goto done;
3451 		}
3452 	}
3453 
3454 	phylink_start(dev->phylink);
3455 
3456 done:
3457 	mutex_unlock(&dev->dev_mutex);
3458 
3459 	if (ret < 0)
3460 		usb_autopm_put_interface(dev->intf);
3461 
3462 	return ret;
3463 }
3464 
3465 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3466 {
3467 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3468 	DECLARE_WAITQUEUE(wait, current);
3469 	int temp;
3470 
3471 	/* ensure there are no more active urbs */
3472 	add_wait_queue(&unlink_wakeup, &wait);
3473 	set_current_state(TASK_UNINTERRUPTIBLE);
3474 	dev->wait = &unlink_wakeup;
3475 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3476 
3477 	/* maybe wait for deletions to finish. */
3478 	while (!skb_queue_empty(&dev->rxq) ||
3479 	       !skb_queue_empty(&dev->txq)) {
3480 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3481 		set_current_state(TASK_UNINTERRUPTIBLE);
3482 		netif_dbg(dev, ifdown, dev->net,
3483 			  "waited for %d urb completions", temp);
3484 	}
3485 	set_current_state(TASK_RUNNING);
3486 	dev->wait = NULL;
3487 	remove_wait_queue(&unlink_wakeup, &wait);
3488 
3489 	/* empty Rx done, Rx overflow and Tx pend queues
3490 	 */
3491 	while (!skb_queue_empty(&dev->rxq_done)) {
3492 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3493 
3494 		lan78xx_release_rx_buf(dev, skb);
3495 	}
3496 
3497 	skb_queue_purge(&dev->rxq_overflow);
3498 	skb_queue_purge(&dev->txq_pend);
3499 }
3500 
3501 static int lan78xx_stop(struct net_device *net)
3502 {
3503 	struct lan78xx_net *dev = netdev_priv(net);
3504 
3505 	netif_dbg(dev, ifup, dev->net, "stop device");
3506 
3507 	mutex_lock(&dev->dev_mutex);
3508 
3509 	if (timer_pending(&dev->stat_monitor))
3510 		timer_delete_sync(&dev->stat_monitor);
3511 
3512 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3513 	napi_disable(&dev->napi);
3514 
3515 	lan78xx_terminate_urbs(dev);
3516 
3517 	netif_info(dev, ifdown, dev->net,
3518 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3519 		   net->stats.rx_packets, net->stats.tx_packets,
3520 		   net->stats.rx_errors, net->stats.tx_errors);
3521 
3522 	phylink_stop(dev->phylink);
3523 
3524 	usb_kill_urb(dev->urb_intr);
3525 
3526 	/* deferred work (task, timer, softirq) must also stop.
3527 	 * can't flush_scheduled_work() until we drop rtnl (later),
3528 	 * else workers could deadlock; so make workers a NOP.
3529 	 */
3530 	clear_bit(EVENT_TX_HALT, &dev->flags);
3531 	clear_bit(EVENT_RX_HALT, &dev->flags);
3532 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3533 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3534 
3535 	cancel_delayed_work_sync(&dev->wq);
3536 
3537 	usb_autopm_put_interface(dev->intf);
3538 
3539 	mutex_unlock(&dev->dev_mutex);
3540 
3541 	return 0;
3542 }
3543 
3544 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3545 			       struct sk_buff_head *list, enum skb_state state)
3546 {
3547 	unsigned long flags;
3548 	enum skb_state old_state;
3549 	struct skb_data *entry = (struct skb_data *)skb->cb;
3550 
3551 	spin_lock_irqsave(&list->lock, flags);
3552 	old_state = entry->state;
3553 	entry->state = state;
3554 
3555 	__skb_unlink(skb, list);
3556 	spin_unlock(&list->lock);
3557 	spin_lock(&dev->rxq_done.lock);
3558 
3559 	__skb_queue_tail(&dev->rxq_done, skb);
3560 	if (skb_queue_len(&dev->rxq_done) == 1)
3561 		napi_schedule(&dev->napi);
3562 
3563 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3564 
3565 	return old_state;
3566 }
3567 
3568 static void tx_complete(struct urb *urb)
3569 {
3570 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3571 	struct skb_data *entry = (struct skb_data *)skb->cb;
3572 	struct lan78xx_net *dev = entry->dev;
3573 
3574 	if (urb->status == 0) {
3575 		dev->net->stats.tx_packets += entry->num_of_packet;
3576 		dev->net->stats.tx_bytes += entry->length;
3577 	} else {
3578 		dev->net->stats.tx_errors += entry->num_of_packet;
3579 
3580 		switch (urb->status) {
3581 		case -EPIPE:
3582 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3583 			break;
3584 
3585 		/* software-driven interface shutdown */
3586 		case -ECONNRESET:
3587 		case -ESHUTDOWN:
3588 			netif_dbg(dev, tx_err, dev->net,
3589 				  "tx err interface gone %d\n",
3590 				  entry->urb->status);
3591 			break;
3592 
3593 		case -EPROTO:
3594 		case -ETIME:
3595 		case -EILSEQ:
3596 			netif_stop_queue(dev->net);
3597 			netif_dbg(dev, tx_err, dev->net,
3598 				  "tx err queue stopped %d\n",
3599 				  entry->urb->status);
3600 			break;
3601 		default:
3602 			netif_dbg(dev, tx_err, dev->net,
3603 				  "unknown tx err %d\n",
3604 				  entry->urb->status);
3605 			break;
3606 		}
3607 	}
3608 
3609 	usb_autopm_put_interface_async(dev->intf);
3610 
3611 	skb_unlink(skb, &dev->txq);
3612 
3613 	lan78xx_release_tx_buf(dev, skb);
3614 
3615 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3616 	 */
3617 	if (skb_queue_empty(&dev->txq) &&
3618 	    !skb_queue_empty(&dev->txq_pend))
3619 		napi_schedule(&dev->napi);
3620 }
3621 
3622 static void lan78xx_queue_skb(struct sk_buff_head *list,
3623 			      struct sk_buff *newsk, enum skb_state state)
3624 {
3625 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3626 
3627 	__skb_queue_tail(list, newsk);
3628 	entry->state = state;
3629 }
3630 
3631 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3632 {
3633 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3634 }
3635 
3636 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3637 {
3638 	return dev->tx_pend_data_len;
3639 }
3640 
3641 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3642 				    struct sk_buff *skb,
3643 				    unsigned int *tx_pend_data_len)
3644 {
3645 	unsigned long flags;
3646 
3647 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3648 
3649 	__skb_queue_tail(&dev->txq_pend, skb);
3650 
3651 	dev->tx_pend_data_len += skb->len;
3652 	*tx_pend_data_len = dev->tx_pend_data_len;
3653 
3654 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3655 }
3656 
3657 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3658 					 struct sk_buff *skb,
3659 					 unsigned int *tx_pend_data_len)
3660 {
3661 	unsigned long flags;
3662 
3663 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3664 
3665 	__skb_queue_head(&dev->txq_pend, skb);
3666 
3667 	dev->tx_pend_data_len += skb->len;
3668 	*tx_pend_data_len = dev->tx_pend_data_len;
3669 
3670 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3671 }
3672 
3673 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3674 				    struct sk_buff **skb,
3675 				    unsigned int *tx_pend_data_len)
3676 {
3677 	unsigned long flags;
3678 
3679 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3680 
3681 	*skb = __skb_dequeue(&dev->txq_pend);
3682 	if (*skb)
3683 		dev->tx_pend_data_len -= (*skb)->len;
3684 	*tx_pend_data_len = dev->tx_pend_data_len;
3685 
3686 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3687 }
3688 
3689 static netdev_tx_t
3690 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3691 {
3692 	struct lan78xx_net *dev = netdev_priv(net);
3693 	unsigned int tx_pend_data_len;
3694 
3695 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3696 		schedule_delayed_work(&dev->wq, 0);
3697 
3698 	skb_tx_timestamp(skb);
3699 
3700 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3701 
3702 	/* Set up a Tx URB if none is in progress */
3703 
3704 	if (skb_queue_empty(&dev->txq))
3705 		napi_schedule(&dev->napi);
3706 
3707 	/* Stop stack Tx queue if we have enough data to fill
3708 	 * all the free Tx URBs.
3709 	 */
3710 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3711 		netif_stop_queue(net);
3712 
3713 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3714 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3715 
3716 		/* Kick off transmission of pending data */
3717 
3718 		if (!skb_queue_empty(&dev->txq_free))
3719 			napi_schedule(&dev->napi);
3720 	}
3721 
3722 	return NETDEV_TX_OK;
3723 }
3724 
3725 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3726 {
3727 	struct lan78xx_priv *pdata = NULL;
3728 	int ret;
3729 	int i;
3730 
3731 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3732 
3733 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3734 	if (!pdata) {
3735 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3736 		return -ENOMEM;
3737 	}
3738 
3739 	pdata->dev = dev;
3740 
3741 	spin_lock_init(&pdata->rfe_ctl_lock);
3742 	mutex_init(&pdata->dataport_mutex);
3743 
3744 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3745 
3746 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3747 		pdata->vlan_table[i] = 0;
3748 
3749 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3750 
3751 	dev->net->features = 0;
3752 
3753 	if (DEFAULT_TX_CSUM_ENABLE)
3754 		dev->net->features |= NETIF_F_HW_CSUM;
3755 
3756 	if (DEFAULT_RX_CSUM_ENABLE)
3757 		dev->net->features |= NETIF_F_RXCSUM;
3758 
3759 	if (DEFAULT_TSO_CSUM_ENABLE)
3760 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3761 
3762 	if (DEFAULT_VLAN_RX_OFFLOAD)
3763 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3764 
3765 	if (DEFAULT_VLAN_FILTER_ENABLE)
3766 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3767 
3768 	dev->net->hw_features = dev->net->features;
3769 
3770 	ret = lan78xx_setup_irq_domain(dev);
3771 	if (ret < 0) {
3772 		netdev_warn(dev->net,
3773 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3774 		goto out1;
3775 	}
3776 
3777 	/* Init all registers */
3778 	ret = lan78xx_reset(dev);
3779 	if (ret) {
3780 		netdev_warn(dev->net, "Registers INIT FAILED....");
3781 		goto out2;
3782 	}
3783 
3784 	ret = lan78xx_mdio_init(dev);
3785 	if (ret) {
3786 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3787 		goto out2;
3788 	}
3789 
3790 	dev->net->flags |= IFF_MULTICAST;
3791 
3792 	pdata->wol = WAKE_MAGIC;
3793 
3794 	return ret;
3795 
3796 out2:
3797 	lan78xx_remove_irq_domain(dev);
3798 
3799 out1:
3800 	netdev_warn(dev->net, "Bind routine FAILED");
3801 	cancel_work_sync(&pdata->set_multicast);
3802 	cancel_work_sync(&pdata->set_vlan);
3803 	kfree(pdata);
3804 	return ret;
3805 }
3806 
3807 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3808 {
3809 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3810 
3811 	lan78xx_remove_irq_domain(dev);
3812 
3813 	lan78xx_remove_mdio(dev);
3814 
3815 	if (pdata) {
3816 		cancel_work_sync(&pdata->set_multicast);
3817 		cancel_work_sync(&pdata->set_vlan);
3818 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3819 		kfree(pdata);
3820 		pdata = NULL;
3821 		dev->data[0] = 0;
3822 	}
3823 }
3824 
3825 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3826 				    struct sk_buff *skb,
3827 				    u32 rx_cmd_a, u32 rx_cmd_b)
3828 {
3829 	/* HW Checksum offload appears to be flawed if used when not stripping
3830 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3831 	 */
3832 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3833 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3834 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3835 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3836 		skb->ip_summed = CHECKSUM_NONE;
3837 	} else {
3838 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3839 		skb->ip_summed = CHECKSUM_COMPLETE;
3840 	}
3841 }
3842 
3843 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3844 				    struct sk_buff *skb,
3845 				    u32 rx_cmd_a, u32 rx_cmd_b)
3846 {
3847 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3848 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3849 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3850 				       (rx_cmd_b & 0xffff));
3851 }
3852 
3853 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3854 {
3855 	dev->net->stats.rx_packets++;
3856 	dev->net->stats.rx_bytes += skb->len;
3857 
3858 	skb->protocol = eth_type_trans(skb, dev->net);
3859 
3860 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3861 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3862 	memset(skb->cb, 0, sizeof(struct skb_data));
3863 
3864 	if (skb_defer_rx_timestamp(skb))
3865 		return;
3866 
3867 	napi_gro_receive(&dev->napi, skb);
3868 }
3869 
3870 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3871 		      int budget, int *work_done)
3872 {
3873 	if (skb->len < RX_SKB_MIN_LEN)
3874 		return 0;
3875 
3876 	/* Extract frames from the URB buffer and pass each one to
3877 	 * the stack in a new NAPI SKB.
3878 	 */
3879 	while (skb->len > 0) {
3880 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3881 		u16 rx_cmd_c;
3882 		unsigned char *packet;
3883 
3884 		rx_cmd_a = get_unaligned_le32(skb->data);
3885 		skb_pull(skb, sizeof(rx_cmd_a));
3886 
3887 		rx_cmd_b = get_unaligned_le32(skb->data);
3888 		skb_pull(skb, sizeof(rx_cmd_b));
3889 
3890 		rx_cmd_c = get_unaligned_le16(skb->data);
3891 		skb_pull(skb, sizeof(rx_cmd_c));
3892 
3893 		packet = skb->data;
3894 
3895 		/* get the packet length */
3896 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3897 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3898 
3899 		if (unlikely(size > skb->len)) {
3900 			netif_dbg(dev, rx_err, dev->net,
3901 				  "size err rx_cmd_a=0x%08x\n",
3902 				  rx_cmd_a);
3903 			return 0;
3904 		}
3905 
3906 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3907 			netif_dbg(dev, rx_err, dev->net,
3908 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3909 		} else {
3910 			u32 frame_len;
3911 			struct sk_buff *skb2;
3912 
3913 			if (unlikely(size < ETH_FCS_LEN)) {
3914 				netif_dbg(dev, rx_err, dev->net,
3915 					  "size err rx_cmd_a=0x%08x\n",
3916 					  rx_cmd_a);
3917 				return 0;
3918 			}
3919 
3920 			frame_len = size - ETH_FCS_LEN;
3921 
3922 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3923 			if (!skb2)
3924 				return 0;
3925 
3926 			memcpy(skb2->data, packet, frame_len);
3927 
3928 			skb_put(skb2, frame_len);
3929 
3930 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3931 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3932 
3933 			/* Processing of the URB buffer must complete once
3934 			 * it has started. If the NAPI work budget is exhausted
3935 			 * while frames remain they are added to the overflow
3936 			 * queue for delivery in the next NAPI polling cycle.
3937 			 */
3938 			if (*work_done < budget) {
3939 				lan78xx_skb_return(dev, skb2);
3940 				++(*work_done);
3941 			} else {
3942 				skb_queue_tail(&dev->rxq_overflow, skb2);
3943 			}
3944 		}
3945 
3946 		skb_pull(skb, size);
3947 
3948 		/* skip padding bytes before the next frame starts */
3949 		if (skb->len)
3950 			skb_pull(skb, align_count);
3951 	}
3952 
3953 	return 1;
3954 }
3955 
3956 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3957 			      int budget, int *work_done)
3958 {
3959 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3960 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3961 		dev->net->stats.rx_errors++;
3962 	}
3963 }
3964 
3965 static void rx_complete(struct urb *urb)
3966 {
3967 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3968 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3969 	struct lan78xx_net *dev = entry->dev;
3970 	int urb_status = urb->status;
3971 	enum skb_state state;
3972 
3973 	netif_dbg(dev, rx_status, dev->net,
3974 		  "rx done: status %d", urb->status);
3975 
3976 	skb_put(skb, urb->actual_length);
3977 	state = rx_done;
3978 
3979 	if (urb != entry->urb)
3980 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3981 
3982 	switch (urb_status) {
3983 	case 0:
3984 		if (skb->len < RX_SKB_MIN_LEN) {
3985 			state = rx_cleanup;
3986 			dev->net->stats.rx_errors++;
3987 			dev->net->stats.rx_length_errors++;
3988 			netif_dbg(dev, rx_err, dev->net,
3989 				  "rx length %d\n", skb->len);
3990 		}
3991 		usb_mark_last_busy(dev->udev);
3992 		break;
3993 	case -EPIPE:
3994 		dev->net->stats.rx_errors++;
3995 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3996 		fallthrough;
3997 	case -ECONNRESET:				/* async unlink */
3998 	case -ESHUTDOWN:				/* hardware gone */
3999 		netif_dbg(dev, ifdown, dev->net,
4000 			  "rx shutdown, code %d\n", urb_status);
4001 		state = rx_cleanup;
4002 		break;
4003 	case -EPROTO:
4004 	case -ETIME:
4005 	case -EILSEQ:
4006 		dev->net->stats.rx_errors++;
4007 		state = rx_cleanup;
4008 		break;
4009 
4010 	/* data overrun ... flush fifo? */
4011 	case -EOVERFLOW:
4012 		dev->net->stats.rx_over_errors++;
4013 		fallthrough;
4014 
4015 	default:
4016 		state = rx_cleanup;
4017 		dev->net->stats.rx_errors++;
4018 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4019 		break;
4020 	}
4021 
4022 	state = defer_bh(dev, skb, &dev->rxq, state);
4023 }
4024 
4025 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4026 {
4027 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4028 	size_t size = dev->rx_urb_size;
4029 	struct urb *urb = entry->urb;
4030 	unsigned long lockflags;
4031 	int ret = 0;
4032 
4033 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4034 			  skb->data, size, rx_complete, skb);
4035 
4036 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4037 
4038 	if (netif_device_present(dev->net) &&
4039 	    netif_running(dev->net) &&
4040 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4041 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4042 		ret = usb_submit_urb(urb, flags);
4043 		switch (ret) {
4044 		case 0:
4045 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4046 			break;
4047 		case -EPIPE:
4048 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4049 			break;
4050 		case -ENODEV:
4051 		case -ENOENT:
4052 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4053 			netif_device_detach(dev->net);
4054 			break;
4055 		case -EHOSTUNREACH:
4056 			ret = -ENOLINK;
4057 			napi_schedule(&dev->napi);
4058 			break;
4059 		default:
4060 			netif_dbg(dev, rx_err, dev->net,
4061 				  "rx submit, %d\n", ret);
4062 			napi_schedule(&dev->napi);
4063 			break;
4064 		}
4065 	} else {
4066 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4067 		ret = -ENOLINK;
4068 	}
4069 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4070 
4071 	if (ret)
4072 		lan78xx_release_rx_buf(dev, skb);
4073 
4074 	return ret;
4075 }
4076 
4077 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4078 {
4079 	struct sk_buff *rx_buf;
4080 
4081 	/* Ensure the maximum number of Rx URBs is submitted
4082 	 */
4083 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4084 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4085 			break;
4086 	}
4087 }
4088 
4089 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4090 				    struct sk_buff *rx_buf)
4091 {
4092 	/* reset SKB data pointers */
4093 
4094 	rx_buf->data = rx_buf->head;
4095 	skb_reset_tail_pointer(rx_buf);
4096 	rx_buf->len = 0;
4097 	rx_buf->data_len = 0;
4098 
4099 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4100 }
4101 
4102 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4103 {
4104 	u32 tx_cmd_a;
4105 	u32 tx_cmd_b;
4106 
4107 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4108 
4109 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4110 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4111 
4112 	tx_cmd_b = 0;
4113 	if (skb_is_gso(skb)) {
4114 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4115 
4116 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4117 
4118 		tx_cmd_a |= TX_CMD_A_LSO_;
4119 	}
4120 
4121 	if (skb_vlan_tag_present(skb)) {
4122 		tx_cmd_a |= TX_CMD_A_IVTG_;
4123 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4124 	}
4125 
4126 	put_unaligned_le32(tx_cmd_a, buffer);
4127 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4128 }
4129 
4130 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4131 					    struct sk_buff *tx_buf)
4132 {
4133 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4134 	int remain = dev->tx_urb_size;
4135 	u8 *tx_data = tx_buf->data;
4136 	u32 urb_len = 0;
4137 
4138 	entry->num_of_packet = 0;
4139 	entry->length = 0;
4140 
4141 	/* Work through the pending SKBs and copy the data of each SKB into
4142 	 * the URB buffer if there room for all the SKB data.
4143 	 *
4144 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4145 	 */
4146 	while (remain >= TX_SKB_MIN_LEN) {
4147 		unsigned int pending_bytes;
4148 		unsigned int align_bytes;
4149 		struct sk_buff *skb;
4150 		unsigned int len;
4151 
4152 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4153 
4154 		if (!skb)
4155 			break;
4156 
4157 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4158 			      TX_ALIGNMENT;
4159 		len = align_bytes + TX_CMD_LEN + skb->len;
4160 		if (len > remain) {
4161 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4162 			break;
4163 		}
4164 
4165 		tx_data += align_bytes;
4166 
4167 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4168 		tx_data += TX_CMD_LEN;
4169 
4170 		len = skb->len;
4171 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4172 			struct net_device_stats *stats = &dev->net->stats;
4173 
4174 			stats->tx_dropped++;
4175 			dev_kfree_skb_any(skb);
4176 			tx_data -= TX_CMD_LEN;
4177 			continue;
4178 		}
4179 
4180 		tx_data += len;
4181 		entry->length += len;
4182 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4183 
4184 		dev_kfree_skb_any(skb);
4185 
4186 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4187 
4188 		remain = dev->tx_urb_size - urb_len;
4189 	}
4190 
4191 	skb_put(tx_buf, urb_len);
4192 
4193 	return entry;
4194 }
4195 
4196 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4197 {
4198 	int ret;
4199 
4200 	/* Start the stack Tx queue if it was stopped
4201 	 */
4202 	netif_tx_lock(dev->net);
4203 	if (netif_queue_stopped(dev->net)) {
4204 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4205 			netif_wake_queue(dev->net);
4206 	}
4207 	netif_tx_unlock(dev->net);
4208 
4209 	/* Go through the Tx pending queue and set up URBs to transfer
4210 	 * the data to the device. Stop if no more pending data or URBs,
4211 	 * or if an error occurs when a URB is submitted.
4212 	 */
4213 	do {
4214 		struct skb_data *entry;
4215 		struct sk_buff *tx_buf;
4216 		unsigned long flags;
4217 
4218 		if (skb_queue_empty(&dev->txq_pend))
4219 			break;
4220 
4221 		tx_buf = lan78xx_get_tx_buf(dev);
4222 		if (!tx_buf)
4223 			break;
4224 
4225 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4226 
4227 		spin_lock_irqsave(&dev->txq.lock, flags);
4228 		ret = usb_autopm_get_interface_async(dev->intf);
4229 		if (ret < 0) {
4230 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4231 			goto out;
4232 		}
4233 
4234 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4235 				  tx_buf->data, tx_buf->len, tx_complete,
4236 				  tx_buf);
4237 
4238 		if (tx_buf->len % dev->maxpacket == 0) {
4239 			/* send USB_ZERO_PACKET */
4240 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4241 		}
4242 
4243 #ifdef CONFIG_PM
4244 		/* if device is asleep stop outgoing packet processing */
4245 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4246 			usb_anchor_urb(entry->urb, &dev->deferred);
4247 			netif_stop_queue(dev->net);
4248 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4249 			netdev_dbg(dev->net,
4250 				   "Delaying transmission for resumption\n");
4251 			return;
4252 		}
4253 #endif
4254 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4255 		switch (ret) {
4256 		case 0:
4257 			netif_trans_update(dev->net);
4258 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4259 			break;
4260 		case -EPIPE:
4261 			netif_stop_queue(dev->net);
4262 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4263 			usb_autopm_put_interface_async(dev->intf);
4264 			break;
4265 		case -ENODEV:
4266 		case -ENOENT:
4267 			netif_dbg(dev, tx_err, dev->net,
4268 				  "tx submit urb err %d (disconnected?)", ret);
4269 			netif_device_detach(dev->net);
4270 			break;
4271 		default:
4272 			usb_autopm_put_interface_async(dev->intf);
4273 			netif_dbg(dev, tx_err, dev->net,
4274 				  "tx submit urb err %d\n", ret);
4275 			break;
4276 		}
4277 
4278 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4279 
4280 		if (ret) {
4281 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4282 out:
4283 			dev->net->stats.tx_dropped += entry->num_of_packet;
4284 			lan78xx_release_tx_buf(dev, tx_buf);
4285 		}
4286 	} while (ret == 0);
4287 }
4288 
4289 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4290 {
4291 	struct sk_buff_head done;
4292 	struct sk_buff *rx_buf;
4293 	struct skb_data *entry;
4294 	unsigned long flags;
4295 	int work_done = 0;
4296 
4297 	/* Pass frames received in the last NAPI cycle before
4298 	 * working on newly completed URBs.
4299 	 */
4300 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4301 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4302 		++work_done;
4303 	}
4304 
4305 	/* Take a snapshot of the done queue and move items to a
4306 	 * temporary queue. Rx URB completions will continue to add
4307 	 * to the done queue.
4308 	 */
4309 	__skb_queue_head_init(&done);
4310 
4311 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4312 	skb_queue_splice_init(&dev->rxq_done, &done);
4313 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4314 
4315 	/* Extract receive frames from completed URBs and
4316 	 * pass them to the stack. Re-submit each completed URB.
4317 	 */
4318 	while ((work_done < budget) &&
4319 	       (rx_buf = __skb_dequeue(&done))) {
4320 		entry = (struct skb_data *)(rx_buf->cb);
4321 		switch (entry->state) {
4322 		case rx_done:
4323 			rx_process(dev, rx_buf, budget, &work_done);
4324 			break;
4325 		case rx_cleanup:
4326 			break;
4327 		default:
4328 			netdev_dbg(dev->net, "rx buf state %d\n",
4329 				   entry->state);
4330 			break;
4331 		}
4332 
4333 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4334 	}
4335 
4336 	/* If budget was consumed before processing all the URBs put them
4337 	 * back on the front of the done queue. They will be first to be
4338 	 * processed in the next NAPI cycle.
4339 	 */
4340 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4341 	skb_queue_splice(&done, &dev->rxq_done);
4342 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4343 
4344 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4345 		/* reset update timer delta */
4346 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4347 			dev->delta = 1;
4348 			mod_timer(&dev->stat_monitor,
4349 				  jiffies + STAT_UPDATE_TIMER);
4350 		}
4351 
4352 		/* Submit all free Rx URBs */
4353 
4354 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4355 			lan78xx_rx_urb_submit_all(dev);
4356 
4357 		/* Submit new Tx URBs */
4358 
4359 		lan78xx_tx_bh(dev);
4360 	}
4361 
4362 	return work_done;
4363 }
4364 
4365 static int lan78xx_poll(struct napi_struct *napi, int budget)
4366 {
4367 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4368 	int result = budget;
4369 	int work_done;
4370 
4371 	/* Don't do any work if the device is suspended */
4372 
4373 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4374 		napi_complete_done(napi, 0);
4375 		return 0;
4376 	}
4377 
4378 	/* Process completed URBs and submit new URBs */
4379 
4380 	work_done = lan78xx_bh(dev, budget);
4381 
4382 	if (work_done < budget) {
4383 		napi_complete_done(napi, work_done);
4384 
4385 		/* Start a new polling cycle if data was received or
4386 		 * data is waiting to be transmitted.
4387 		 */
4388 		if (!skb_queue_empty(&dev->rxq_done)) {
4389 			napi_schedule(napi);
4390 		} else if (netif_carrier_ok(dev->net)) {
4391 			if (skb_queue_empty(&dev->txq) &&
4392 			    !skb_queue_empty(&dev->txq_pend)) {
4393 				napi_schedule(napi);
4394 			} else {
4395 				netif_tx_lock(dev->net);
4396 				if (netif_queue_stopped(dev->net)) {
4397 					netif_wake_queue(dev->net);
4398 					napi_schedule(napi);
4399 				}
4400 				netif_tx_unlock(dev->net);
4401 			}
4402 		}
4403 		result = work_done;
4404 	}
4405 
4406 	return result;
4407 }
4408 
4409 static void lan78xx_delayedwork(struct work_struct *work)
4410 {
4411 	int status;
4412 	struct lan78xx_net *dev;
4413 
4414 	dev = container_of(work, struct lan78xx_net, wq.work);
4415 
4416 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4417 		return;
4418 
4419 	if (usb_autopm_get_interface(dev->intf) < 0)
4420 		return;
4421 
4422 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4423 		unlink_urbs(dev, &dev->txq);
4424 
4425 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4426 		if (status < 0 &&
4427 		    status != -EPIPE &&
4428 		    status != -ESHUTDOWN) {
4429 			if (netif_msg_tx_err(dev))
4430 				netdev_err(dev->net,
4431 					   "can't clear tx halt, status %d\n",
4432 					   status);
4433 		} else {
4434 			clear_bit(EVENT_TX_HALT, &dev->flags);
4435 			if (status != -ESHUTDOWN)
4436 				netif_wake_queue(dev->net);
4437 		}
4438 	}
4439 
4440 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4441 		unlink_urbs(dev, &dev->rxq);
4442 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4443 		if (status < 0 &&
4444 		    status != -EPIPE &&
4445 		    status != -ESHUTDOWN) {
4446 			if (netif_msg_rx_err(dev))
4447 				netdev_err(dev->net,
4448 					   "can't clear rx halt, status %d\n",
4449 					   status);
4450 		} else {
4451 			clear_bit(EVENT_RX_HALT, &dev->flags);
4452 			napi_schedule(&dev->napi);
4453 		}
4454 	}
4455 
4456 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4457 		int ret = 0;
4458 
4459 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4460 		ret = lan78xx_phy_int_ack(dev);
4461 		if (ret)
4462 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4463 				    ERR_PTR(ret));
4464 	}
4465 
4466 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4467 		lan78xx_update_stats(dev);
4468 
4469 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4470 
4471 		mod_timer(&dev->stat_monitor,
4472 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4473 
4474 		dev->delta = min((dev->delta * 2), 50);
4475 	}
4476 
4477 	usb_autopm_put_interface(dev->intf);
4478 }
4479 
4480 static void intr_complete(struct urb *urb)
4481 {
4482 	struct lan78xx_net *dev = urb->context;
4483 	int status = urb->status;
4484 
4485 	switch (status) {
4486 	/* success */
4487 	case 0:
4488 		lan78xx_status(dev, urb);
4489 		break;
4490 
4491 	/* software-driven interface shutdown */
4492 	case -ENOENT:			/* urb killed */
4493 	case -ENODEV:			/* hardware gone */
4494 	case -ESHUTDOWN:		/* hardware gone */
4495 		netif_dbg(dev, ifdown, dev->net,
4496 			  "intr shutdown, code %d\n", status);
4497 		return;
4498 
4499 	/* NOTE:  not throttling like RX/TX, since this endpoint
4500 	 * already polls infrequently
4501 	 */
4502 	default:
4503 		netdev_dbg(dev->net, "intr status %d\n", status);
4504 		break;
4505 	}
4506 
4507 	if (!netif_device_present(dev->net) ||
4508 	    !netif_running(dev->net)) {
4509 		netdev_warn(dev->net, "not submitting new status URB");
4510 		return;
4511 	}
4512 
4513 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4514 	status = usb_submit_urb(urb, GFP_ATOMIC);
4515 
4516 	switch (status) {
4517 	case  0:
4518 		break;
4519 	case -ENODEV:
4520 	case -ENOENT:
4521 		netif_dbg(dev, timer, dev->net,
4522 			  "intr resubmit %d (disconnect?)", status);
4523 		netif_device_detach(dev->net);
4524 		break;
4525 	default:
4526 		netif_err(dev, timer, dev->net,
4527 			  "intr resubmit --> %d\n", status);
4528 		break;
4529 	}
4530 }
4531 
4532 static void lan78xx_disconnect(struct usb_interface *intf)
4533 {
4534 	struct lan78xx_net *dev;
4535 	struct usb_device *udev;
4536 	struct net_device *net;
4537 
4538 	dev = usb_get_intfdata(intf);
4539 	usb_set_intfdata(intf, NULL);
4540 	if (!dev)
4541 		return;
4542 
4543 	udev = interface_to_usbdev(intf);
4544 	net = dev->net;
4545 
4546 	rtnl_lock();
4547 	phylink_stop(dev->phylink);
4548 	phylink_disconnect_phy(dev->phylink);
4549 	rtnl_unlock();
4550 
4551 	netif_napi_del(&dev->napi);
4552 
4553 	unregister_netdev(net);
4554 
4555 	timer_shutdown_sync(&dev->stat_monitor);
4556 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4557 	cancel_delayed_work_sync(&dev->wq);
4558 
4559 	phylink_destroy(dev->phylink);
4560 
4561 	usb_scuttle_anchored_urbs(&dev->deferred);
4562 
4563 	lan78xx_unbind(dev, intf);
4564 
4565 	lan78xx_free_tx_resources(dev);
4566 	lan78xx_free_rx_resources(dev);
4567 
4568 	usb_kill_urb(dev->urb_intr);
4569 	usb_free_urb(dev->urb_intr);
4570 
4571 	free_netdev(net);
4572 	usb_put_dev(udev);
4573 }
4574 
4575 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4576 {
4577 	struct lan78xx_net *dev = netdev_priv(net);
4578 
4579 	unlink_urbs(dev, &dev->txq);
4580 	napi_schedule(&dev->napi);
4581 }
4582 
4583 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4584 						struct net_device *netdev,
4585 						netdev_features_t features)
4586 {
4587 	struct lan78xx_net *dev = netdev_priv(netdev);
4588 
4589 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4590 		features &= ~NETIF_F_GSO_MASK;
4591 
4592 	features = vlan_features_check(skb, features);
4593 	features = vxlan_features_check(skb, features);
4594 
4595 	return features;
4596 }
4597 
4598 static const struct net_device_ops lan78xx_netdev_ops = {
4599 	.ndo_open		= lan78xx_open,
4600 	.ndo_stop		= lan78xx_stop,
4601 	.ndo_start_xmit		= lan78xx_start_xmit,
4602 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4603 	.ndo_change_mtu		= lan78xx_change_mtu,
4604 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4605 	.ndo_validate_addr	= eth_validate_addr,
4606 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4607 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4608 	.ndo_set_features	= lan78xx_set_features,
4609 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4610 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4611 	.ndo_features_check	= lan78xx_features_check,
4612 };
4613 
4614 static void lan78xx_stat_monitor(struct timer_list *t)
4615 {
4616 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4617 
4618 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4619 }
4620 
4621 static int lan78xx_probe(struct usb_interface *intf,
4622 			 const struct usb_device_id *id)
4623 {
4624 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4625 	struct lan78xx_net *dev;
4626 	struct net_device *netdev;
4627 	struct usb_device *udev;
4628 	int ret;
4629 	unsigned int maxp;
4630 	unsigned int period;
4631 	u8 *buf = NULL;
4632 
4633 	udev = interface_to_usbdev(intf);
4634 	udev = usb_get_dev(udev);
4635 
4636 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4637 	if (!netdev) {
4638 		dev_err(&intf->dev, "Error: OOM\n");
4639 		ret = -ENOMEM;
4640 		goto out1;
4641 	}
4642 
4643 	SET_NETDEV_DEV(netdev, &intf->dev);
4644 
4645 	dev = netdev_priv(netdev);
4646 	dev->udev = udev;
4647 	dev->intf = intf;
4648 	dev->net = netdev;
4649 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4650 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4651 
4652 	skb_queue_head_init(&dev->rxq);
4653 	skb_queue_head_init(&dev->txq);
4654 	skb_queue_head_init(&dev->rxq_done);
4655 	skb_queue_head_init(&dev->txq_pend);
4656 	skb_queue_head_init(&dev->rxq_overflow);
4657 	mutex_init(&dev->mdiobus_mutex);
4658 	mutex_init(&dev->dev_mutex);
4659 
4660 	ret = lan78xx_urb_config_init(dev);
4661 	if (ret < 0)
4662 		goto out2;
4663 
4664 	ret = lan78xx_alloc_tx_resources(dev);
4665 	if (ret < 0)
4666 		goto out2;
4667 
4668 	ret = lan78xx_alloc_rx_resources(dev);
4669 	if (ret < 0)
4670 		goto out3;
4671 
4672 	/* MTU range: 68 - 9000 */
4673 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4674 
4675 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4676 
4677 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4678 
4679 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4680 	init_usb_anchor(&dev->deferred);
4681 
4682 	netdev->netdev_ops = &lan78xx_netdev_ops;
4683 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4684 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4685 
4686 	dev->delta = 1;
4687 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4688 
4689 	mutex_init(&dev->stats.access_lock);
4690 
4691 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4692 		ret = -ENODEV;
4693 		goto out4;
4694 	}
4695 
4696 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4697 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4698 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4699 		ret = -ENODEV;
4700 		goto out4;
4701 	}
4702 
4703 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4704 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4705 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4706 		ret = -ENODEV;
4707 		goto out4;
4708 	}
4709 
4710 	ep_intr = &intf->cur_altsetting->endpoint[2];
4711 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4712 		ret = -ENODEV;
4713 		goto out4;
4714 	}
4715 
4716 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4717 					usb_endpoint_num(&ep_intr->desc));
4718 
4719 	ret = lan78xx_bind(dev, intf);
4720 	if (ret < 0)
4721 		goto out4;
4722 
4723 	period = ep_intr->desc.bInterval;
4724 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4725 
4726 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4727 	if (!dev->urb_intr) {
4728 		ret = -ENOMEM;
4729 		goto out5;
4730 	}
4731 
4732 	buf = kmalloc(maxp, GFP_KERNEL);
4733 	if (!buf) {
4734 		ret = -ENOMEM;
4735 		goto free_urbs;
4736 	}
4737 
4738 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4739 			 dev->pipe_intr, buf, maxp,
4740 			 intr_complete, dev, period);
4741 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4742 
4743 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4744 
4745 	/* Reject broken descriptors. */
4746 	if (dev->maxpacket == 0) {
4747 		ret = -ENODEV;
4748 		goto free_urbs;
4749 	}
4750 
4751 	/* driver requires remote-wakeup capability during autosuspend. */
4752 	intf->needs_remote_wakeup = 1;
4753 
4754 	ret = lan78xx_phy_init(dev);
4755 	if (ret < 0)
4756 		goto free_urbs;
4757 
4758 	ret = register_netdev(netdev);
4759 	if (ret != 0) {
4760 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4761 		goto phy_uninit;
4762 	}
4763 
4764 	usb_set_intfdata(intf, dev);
4765 
4766 	ret = device_set_wakeup_enable(&udev->dev, true);
4767 
4768 	 /* Default delay of 2sec has more overhead than advantage.
4769 	  * Set to 10sec as default.
4770 	  */
4771 	pm_runtime_set_autosuspend_delay(&udev->dev,
4772 					 DEFAULT_AUTOSUSPEND_DELAY);
4773 
4774 	return 0;
4775 
4776 phy_uninit:
4777 	lan78xx_phy_uninit(dev);
4778 free_urbs:
4779 	usb_free_urb(dev->urb_intr);
4780 out5:
4781 	lan78xx_unbind(dev, intf);
4782 out4:
4783 	netif_napi_del(&dev->napi);
4784 	lan78xx_free_rx_resources(dev);
4785 out3:
4786 	lan78xx_free_tx_resources(dev);
4787 out2:
4788 	free_netdev(netdev);
4789 out1:
4790 	usb_put_dev(udev);
4791 
4792 	return ret;
4793 }
4794 
4795 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4796 {
4797 	const u16 crc16poly = 0x8005;
4798 	int i;
4799 	u16 bit, crc, msb;
4800 	u8 data;
4801 
4802 	crc = 0xFFFF;
4803 	for (i = 0; i < len; i++) {
4804 		data = *buf++;
4805 		for (bit = 0; bit < 8; bit++) {
4806 			msb = crc >> 15;
4807 			crc <<= 1;
4808 
4809 			if (msb ^ (u16)(data & 1)) {
4810 				crc ^= crc16poly;
4811 				crc |= (u16)0x0001U;
4812 			}
4813 			data >>= 1;
4814 		}
4815 	}
4816 
4817 	return crc;
4818 }
4819 
4820 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4821 {
4822 	u32 buf;
4823 	int ret;
4824 
4825 	ret = lan78xx_stop_tx_path(dev);
4826 	if (ret < 0)
4827 		return ret;
4828 
4829 	ret = lan78xx_stop_rx_path(dev);
4830 	if (ret < 0)
4831 		return ret;
4832 
4833 	/* auto suspend (selective suspend) */
4834 
4835 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4836 	if (ret < 0)
4837 		return ret;
4838 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4839 	if (ret < 0)
4840 		return ret;
4841 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4842 	if (ret < 0)
4843 		return ret;
4844 
4845 	/* set goodframe wakeup */
4846 
4847 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4848 	if (ret < 0)
4849 		return ret;
4850 
4851 	buf |= WUCSR_RFE_WAKE_EN_;
4852 	buf |= WUCSR_STORE_WAKE_;
4853 
4854 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4855 	if (ret < 0)
4856 		return ret;
4857 
4858 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4859 	if (ret < 0)
4860 		return ret;
4861 
4862 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4863 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4864 	buf |= PMT_CTL_PHY_WAKE_EN_;
4865 	buf |= PMT_CTL_WOL_EN_;
4866 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4867 	buf |= PMT_CTL_SUS_MODE_3_;
4868 
4869 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4870 	if (ret < 0)
4871 		return ret;
4872 
4873 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4874 	if (ret < 0)
4875 		return ret;
4876 
4877 	buf |= PMT_CTL_WUPS_MASK_;
4878 
4879 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4880 	if (ret < 0)
4881 		return ret;
4882 
4883 	ret = lan78xx_start_rx_path(dev);
4884 
4885 	return ret;
4886 }
4887 
4888 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4889 {
4890 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4891 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4892 	const u8 arp_type[2] = { 0x08, 0x06 };
4893 	u32 temp_pmt_ctl;
4894 	int mask_index;
4895 	u32 temp_wucsr;
4896 	u32 buf;
4897 	u16 crc;
4898 	int ret;
4899 
4900 	ret = lan78xx_stop_tx_path(dev);
4901 	if (ret < 0)
4902 		return ret;
4903 	ret = lan78xx_stop_rx_path(dev);
4904 	if (ret < 0)
4905 		return ret;
4906 
4907 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4908 	if (ret < 0)
4909 		return ret;
4910 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4911 	if (ret < 0)
4912 		return ret;
4913 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4914 	if (ret < 0)
4915 		return ret;
4916 
4917 	temp_wucsr = 0;
4918 
4919 	temp_pmt_ctl = 0;
4920 
4921 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4922 	if (ret < 0)
4923 		return ret;
4924 
4925 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4926 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4927 
4928 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4929 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4930 		if (ret < 0)
4931 			return ret;
4932 	}
4933 
4934 	mask_index = 0;
4935 	if (wol & WAKE_PHY) {
4936 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4937 
4938 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4939 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4940 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4941 	}
4942 	if (wol & WAKE_MAGIC) {
4943 		temp_wucsr |= WUCSR_MPEN_;
4944 
4945 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4946 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4947 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4948 	}
4949 	if (wol & WAKE_BCAST) {
4950 		temp_wucsr |= WUCSR_BCST_EN_;
4951 
4952 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4953 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4954 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4955 	}
4956 	if (wol & WAKE_MCAST) {
4957 		temp_wucsr |= WUCSR_WAKE_EN_;
4958 
4959 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4960 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4961 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4962 					WUF_CFGX_EN_ |
4963 					WUF_CFGX_TYPE_MCAST_ |
4964 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4965 					(crc & WUF_CFGX_CRC16_MASK_));
4966 		if (ret < 0)
4967 			return ret;
4968 
4969 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4970 		if (ret < 0)
4971 			return ret;
4972 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4973 		if (ret < 0)
4974 			return ret;
4975 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4976 		if (ret < 0)
4977 			return ret;
4978 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4979 		if (ret < 0)
4980 			return ret;
4981 
4982 		mask_index++;
4983 
4984 		/* for IPv6 Multicast */
4985 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4986 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4987 					WUF_CFGX_EN_ |
4988 					WUF_CFGX_TYPE_MCAST_ |
4989 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4990 					(crc & WUF_CFGX_CRC16_MASK_));
4991 		if (ret < 0)
4992 			return ret;
4993 
4994 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4995 		if (ret < 0)
4996 			return ret;
4997 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4998 		if (ret < 0)
4999 			return ret;
5000 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5001 		if (ret < 0)
5002 			return ret;
5003 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5004 		if (ret < 0)
5005 			return ret;
5006 
5007 		mask_index++;
5008 
5009 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5010 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5011 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5012 	}
5013 	if (wol & WAKE_UCAST) {
5014 		temp_wucsr |= WUCSR_PFDA_EN_;
5015 
5016 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5017 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5018 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5019 	}
5020 	if (wol & WAKE_ARP) {
5021 		temp_wucsr |= WUCSR_WAKE_EN_;
5022 
5023 		/* set WUF_CFG & WUF_MASK
5024 		 * for packettype (offset 12,13) = ARP (0x0806)
5025 		 */
5026 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5027 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5028 					WUF_CFGX_EN_ |
5029 					WUF_CFGX_TYPE_ALL_ |
5030 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5031 					(crc & WUF_CFGX_CRC16_MASK_));
5032 		if (ret < 0)
5033 			return ret;
5034 
5035 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5036 		if (ret < 0)
5037 			return ret;
5038 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5039 		if (ret < 0)
5040 			return ret;
5041 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5042 		if (ret < 0)
5043 			return ret;
5044 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5045 		if (ret < 0)
5046 			return ret;
5047 
5048 		mask_index++;
5049 
5050 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5051 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5052 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5053 	}
5054 
5055 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5056 	if (ret < 0)
5057 		return ret;
5058 
5059 	/* when multiple WOL bits are set */
5060 	if (hweight_long((unsigned long)wol) > 1) {
5061 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5062 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5063 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5064 	}
5065 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5066 	if (ret < 0)
5067 		return ret;
5068 
5069 	/* clear WUPS */
5070 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5071 	if (ret < 0)
5072 		return ret;
5073 
5074 	buf |= PMT_CTL_WUPS_MASK_;
5075 
5076 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5077 	if (ret < 0)
5078 		return ret;
5079 
5080 	ret = lan78xx_start_rx_path(dev);
5081 
5082 	return ret;
5083 }
5084 
5085 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5086 {
5087 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5088 	bool dev_open;
5089 	int ret;
5090 
5091 	mutex_lock(&dev->dev_mutex);
5092 
5093 	netif_dbg(dev, ifdown, dev->net,
5094 		  "suspending: pm event %#x", message.event);
5095 
5096 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5097 
5098 	if (dev_open) {
5099 		spin_lock_irq(&dev->txq.lock);
5100 		/* don't autosuspend while transmitting */
5101 		if ((skb_queue_len(&dev->txq) ||
5102 		     skb_queue_len(&dev->txq_pend)) &&
5103 		    PMSG_IS_AUTO(message)) {
5104 			spin_unlock_irq(&dev->txq.lock);
5105 			ret = -EBUSY;
5106 			goto out;
5107 		} else {
5108 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5109 			spin_unlock_irq(&dev->txq.lock);
5110 		}
5111 
5112 		rtnl_lock();
5113 		phylink_suspend(dev->phylink, false);
5114 		rtnl_unlock();
5115 
5116 		/* stop RX */
5117 		ret = lan78xx_stop_rx_path(dev);
5118 		if (ret < 0)
5119 			goto out;
5120 
5121 		ret = lan78xx_flush_rx_fifo(dev);
5122 		if (ret < 0)
5123 			goto out;
5124 
5125 		/* stop Tx */
5126 		ret = lan78xx_stop_tx_path(dev);
5127 		if (ret < 0)
5128 			goto out;
5129 
5130 		/* empty out the Rx and Tx queues */
5131 		netif_device_detach(dev->net);
5132 		lan78xx_terminate_urbs(dev);
5133 		usb_kill_urb(dev->urb_intr);
5134 
5135 		/* reattach */
5136 		netif_device_attach(dev->net);
5137 
5138 		timer_delete(&dev->stat_monitor);
5139 
5140 		if (PMSG_IS_AUTO(message)) {
5141 			ret = lan78xx_set_auto_suspend(dev);
5142 			if (ret < 0)
5143 				goto out;
5144 		} else {
5145 			struct lan78xx_priv *pdata;
5146 
5147 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5148 			netif_carrier_off(dev->net);
5149 			ret = lan78xx_set_suspend(dev, pdata->wol);
5150 			if (ret < 0)
5151 				goto out;
5152 		}
5153 	} else {
5154 		/* Interface is down; don't allow WOL and PHY
5155 		 * events to wake up the host
5156 		 */
5157 		u32 buf;
5158 
5159 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5160 
5161 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5162 		if (ret < 0)
5163 			goto out;
5164 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5165 		if (ret < 0)
5166 			goto out;
5167 
5168 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5169 		if (ret < 0)
5170 			goto out;
5171 
5172 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5173 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5174 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5175 		buf |= PMT_CTL_SUS_MODE_3_;
5176 
5177 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5178 		if (ret < 0)
5179 			goto out;
5180 
5181 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5182 		if (ret < 0)
5183 			goto out;
5184 
5185 		buf |= PMT_CTL_WUPS_MASK_;
5186 
5187 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5188 		if (ret < 0)
5189 			goto out;
5190 	}
5191 
5192 	ret = 0;
5193 out:
5194 	mutex_unlock(&dev->dev_mutex);
5195 
5196 	return ret;
5197 }
5198 
5199 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5200 {
5201 	bool pipe_halted = false;
5202 	struct urb *urb;
5203 
5204 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5205 		struct sk_buff *skb = urb->context;
5206 		int ret;
5207 
5208 		if (!netif_device_present(dev->net) ||
5209 		    !netif_carrier_ok(dev->net) ||
5210 		    pipe_halted) {
5211 			lan78xx_release_tx_buf(dev, skb);
5212 			continue;
5213 		}
5214 
5215 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5216 
5217 		if (ret == 0) {
5218 			netif_trans_update(dev->net);
5219 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5220 		} else {
5221 			if (ret == -EPIPE) {
5222 				netif_stop_queue(dev->net);
5223 				pipe_halted = true;
5224 			} else if (ret == -ENODEV) {
5225 				netif_device_detach(dev->net);
5226 			}
5227 
5228 			lan78xx_release_tx_buf(dev, skb);
5229 		}
5230 	}
5231 
5232 	return pipe_halted;
5233 }
5234 
5235 static int lan78xx_resume(struct usb_interface *intf)
5236 {
5237 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5238 	bool dev_open;
5239 	int ret;
5240 
5241 	mutex_lock(&dev->dev_mutex);
5242 
5243 	netif_dbg(dev, ifup, dev->net, "resuming device");
5244 
5245 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5246 
5247 	if (dev_open) {
5248 		bool pipe_halted = false;
5249 
5250 		ret = lan78xx_flush_tx_fifo(dev);
5251 		if (ret < 0)
5252 			goto out;
5253 
5254 		if (dev->urb_intr) {
5255 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5256 
5257 			if (ret < 0) {
5258 				if (ret == -ENODEV)
5259 					netif_device_detach(dev->net);
5260 				netdev_warn(dev->net, "Failed to submit intr URB");
5261 			}
5262 		}
5263 
5264 		spin_lock_irq(&dev->txq.lock);
5265 
5266 		if (netif_device_present(dev->net)) {
5267 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5268 
5269 			if (pipe_halted)
5270 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5271 		}
5272 
5273 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5274 
5275 		spin_unlock_irq(&dev->txq.lock);
5276 
5277 		if (!pipe_halted &&
5278 		    netif_device_present(dev->net) &&
5279 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5280 			netif_start_queue(dev->net);
5281 
5282 		ret = lan78xx_start_tx_path(dev);
5283 		if (ret < 0)
5284 			goto out;
5285 
5286 		napi_schedule(&dev->napi);
5287 
5288 		if (!timer_pending(&dev->stat_monitor)) {
5289 			dev->delta = 1;
5290 			mod_timer(&dev->stat_monitor,
5291 				  jiffies + STAT_UPDATE_TIMER);
5292 		}
5293 
5294 	} else {
5295 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5296 	}
5297 
5298 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5299 	if (ret < 0)
5300 		goto out;
5301 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5302 	if (ret < 0)
5303 		goto out;
5304 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5305 	if (ret < 0)
5306 		goto out;
5307 
5308 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5309 					     WUCSR2_ARP_RCD_ |
5310 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5311 					     WUCSR2_IPV4_TCPSYN_RCD_);
5312 	if (ret < 0)
5313 		goto out;
5314 
5315 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5316 					    WUCSR_EEE_RX_WAKE_ |
5317 					    WUCSR_PFDA_FR_ |
5318 					    WUCSR_RFE_WAKE_FR_ |
5319 					    WUCSR_WUFR_ |
5320 					    WUCSR_MPR_ |
5321 					    WUCSR_BCST_FR_);
5322 	if (ret < 0)
5323 		goto out;
5324 
5325 	ret = 0;
5326 out:
5327 	mutex_unlock(&dev->dev_mutex);
5328 
5329 	return ret;
5330 }
5331 
5332 static int lan78xx_reset_resume(struct usb_interface *intf)
5333 {
5334 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5335 	int ret;
5336 
5337 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5338 
5339 	ret = lan78xx_reset(dev);
5340 	if (ret < 0)
5341 		return ret;
5342 
5343 	ret = lan78xx_resume(intf);
5344 	if (ret < 0)
5345 		return ret;
5346 
5347 	rtnl_lock();
5348 	phylink_resume(dev->phylink);
5349 	rtnl_unlock();
5350 
5351 	return 0;
5352 }
5353 
5354 static const struct usb_device_id products[] = {
5355 	{
5356 	/* LAN7800 USB Gigabit Ethernet Device */
5357 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5358 	},
5359 	{
5360 	/* LAN7850 USB Gigabit Ethernet Device */
5361 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5362 	},
5363 	{
5364 	/* LAN7801 USB Gigabit Ethernet Device */
5365 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5366 	},
5367 	{
5368 	/* ATM2-AF USB Gigabit Ethernet Device */
5369 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5370 	},
5371 	{},
5372 };
5373 MODULE_DEVICE_TABLE(usb, products);
5374 
5375 static struct usb_driver lan78xx_driver = {
5376 	.name			= DRIVER_NAME,
5377 	.id_table		= products,
5378 	.probe			= lan78xx_probe,
5379 	.disconnect		= lan78xx_disconnect,
5380 	.suspend		= lan78xx_suspend,
5381 	.resume			= lan78xx_resume,
5382 	.reset_resume		= lan78xx_reset_resume,
5383 	.supports_autosuspend	= 1,
5384 	.disable_hub_initiated_lpm = 1,
5385 };
5386 
5387 module_usb_driver(lan78xx_driver);
5388 
5389 MODULE_AUTHOR(DRIVER_AUTHOR);
5390 MODULE_DESCRIPTION(DRIVER_DESC);
5391 MODULE_LICENSE("GPL");
5392