xref: /linux/drivers/net/usb/lan78xx.c (revision eed4edda910fe34dfae8c6bfbcf57f4593a54295)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 struct usb_context {
384 	struct usb_ctrlrequest req;
385 	struct lan78xx_net *dev;
386 };
387 
388 #define EVENT_TX_HALT			0
389 #define EVENT_RX_HALT			1
390 #define EVENT_RX_MEMORY			2
391 #define EVENT_STS_SPLIT			3
392 #define EVENT_LINK_RESET		4
393 #define EVENT_RX_PAUSED			5
394 #define EVENT_DEV_WAKING		6
395 #define EVENT_DEV_ASLEEP		7
396 #define EVENT_DEV_OPEN			8
397 #define EVENT_STAT_UPDATE		9
398 #define EVENT_DEV_DISCONNECT		10
399 
400 struct statstage {
401 	struct mutex			access_lock;	/* for stats access */
402 	struct lan78xx_statstage	saved;
403 	struct lan78xx_statstage	rollover_count;
404 	struct lan78xx_statstage	rollover_max;
405 	struct lan78xx_statstage64	curr_stat;
406 };
407 
408 struct irq_domain_data {
409 	struct irq_domain	*irqdomain;
410 	unsigned int		phyirq;
411 	struct irq_chip		*irqchip;
412 	irq_flow_handler_t	irq_handler;
413 	u32			irqenable;
414 	struct mutex		irq_lock;		/* for irq bus access */
415 };
416 
417 struct lan78xx_net {
418 	struct net_device	*net;
419 	struct usb_device	*udev;
420 	struct usb_interface	*intf;
421 	void			*driver_priv;
422 
423 	unsigned int		tx_pend_data_len;
424 	size_t			n_tx_urbs;
425 	size_t			n_rx_urbs;
426 	size_t			tx_urb_size;
427 	size_t			rx_urb_size;
428 
429 	struct sk_buff_head	rxq_free;
430 	struct sk_buff_head	rxq;
431 	struct sk_buff_head	rxq_done;
432 	struct sk_buff_head	rxq_overflow;
433 	struct sk_buff_head	txq_free;
434 	struct sk_buff_head	txq;
435 	struct sk_buff_head	txq_pend;
436 
437 	struct napi_struct	napi;
438 
439 	struct delayed_work	wq;
440 
441 	int			msg_enable;
442 
443 	struct urb		*urb_intr;
444 	struct usb_anchor	deferred;
445 
446 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
447 	struct mutex		phy_mutex; /* for phy access */
448 	unsigned int		pipe_in, pipe_out, pipe_intr;
449 
450 	unsigned int		bulk_in_delay;
451 	unsigned int		burst_cap;
452 
453 	unsigned long		flags;
454 
455 	wait_queue_head_t	*wait;
456 	unsigned char		suspend_count;
457 
458 	unsigned int		maxpacket;
459 	struct timer_list	stat_monitor;
460 
461 	unsigned long		data[5];
462 
463 	int			link_on;
464 	u8			mdix_ctrl;
465 
466 	u32			chipid;
467 	u32			chiprev;
468 	struct mii_bus		*mdiobus;
469 	phy_interface_t		interface;
470 
471 	int			fc_autoneg;
472 	u8			fc_request_control;
473 
474 	int			delta;
475 	struct statstage	stats;
476 
477 	struct irq_domain_data	domain_data;
478 };
479 
480 /* define external phy id */
481 #define	PHY_LAN8835			(0x0007C130)
482 #define	PHY_KSZ9031RNX			(0x00221620)
483 
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
488 
489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490 {
491 	if (skb_queue_empty(buf_pool))
492 		return NULL;
493 
494 	return skb_dequeue(buf_pool);
495 }
496 
497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498 				struct sk_buff *buf)
499 {
500 	buf->data = buf->head;
501 	skb_reset_tail_pointer(buf);
502 
503 	buf->len = 0;
504 	buf->data_len = 0;
505 
506 	skb_queue_tail(buf_pool, buf);
507 }
508 
509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510 {
511 	struct skb_data *entry;
512 	struct sk_buff *buf;
513 
514 	while (!skb_queue_empty(buf_pool)) {
515 		buf = skb_dequeue(buf_pool);
516 		if (buf) {
517 			entry = (struct skb_data *)buf->cb;
518 			usb_free_urb(entry->urb);
519 			dev_kfree_skb_any(buf);
520 		}
521 	}
522 }
523 
524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525 				  size_t n_urbs, size_t urb_size,
526 				  struct lan78xx_net *dev)
527 {
528 	struct skb_data *entry;
529 	struct sk_buff *buf;
530 	struct urb *urb;
531 	int i;
532 
533 	skb_queue_head_init(buf_pool);
534 
535 	for (i = 0; i < n_urbs; i++) {
536 		buf = alloc_skb(urb_size, GFP_ATOMIC);
537 		if (!buf)
538 			goto error;
539 
540 		if (skb_linearize(buf) != 0) {
541 			dev_kfree_skb_any(buf);
542 			goto error;
543 		}
544 
545 		urb = usb_alloc_urb(0, GFP_ATOMIC);
546 		if (!urb) {
547 			dev_kfree_skb_any(buf);
548 			goto error;
549 		}
550 
551 		entry = (struct skb_data *)buf->cb;
552 		entry->urb = urb;
553 		entry->dev = dev;
554 		entry->length = 0;
555 		entry->num_of_packet = 0;
556 
557 		skb_queue_tail(buf_pool, buf);
558 	}
559 
560 	return 0;
561 
562 error:
563 	lan78xx_free_buf_pool(buf_pool);
564 
565 	return -ENOMEM;
566 }
567 
568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569 {
570 	return lan78xx_get_buf(&dev->rxq_free);
571 }
572 
573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574 				   struct sk_buff *rx_buf)
575 {
576 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
577 }
578 
579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580 {
581 	lan78xx_free_buf_pool(&dev->rxq_free);
582 }
583 
584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585 {
586 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
587 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
588 }
589 
590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591 {
592 	return lan78xx_get_buf(&dev->txq_free);
593 }
594 
595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596 				   struct sk_buff *tx_buf)
597 {
598 	lan78xx_release_buf(&dev->txq_free, tx_buf);
599 }
600 
601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602 {
603 	lan78xx_free_buf_pool(&dev->txq_free);
604 }
605 
606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607 {
608 	return lan78xx_alloc_buf_pool(&dev->txq_free,
609 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
610 }
611 
612 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
613 {
614 	u32 *buf;
615 	int ret;
616 
617 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
618 		return -ENODEV;
619 
620 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
621 	if (!buf)
622 		return -ENOMEM;
623 
624 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
625 			      USB_VENDOR_REQUEST_READ_REGISTER,
626 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
627 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
628 	if (likely(ret >= 0)) {
629 		le32_to_cpus(buf);
630 		*data = *buf;
631 	} else if (net_ratelimit()) {
632 		netdev_warn(dev->net,
633 			    "Failed to read register index 0x%08x. ret = %d",
634 			    index, ret);
635 	}
636 
637 	kfree(buf);
638 
639 	return ret;
640 }
641 
642 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
643 {
644 	u32 *buf;
645 	int ret;
646 
647 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
648 		return -ENODEV;
649 
650 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
651 	if (!buf)
652 		return -ENOMEM;
653 
654 	*buf = data;
655 	cpu_to_le32s(buf);
656 
657 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
658 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
659 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
660 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
661 	if (unlikely(ret < 0) &&
662 	    net_ratelimit()) {
663 		netdev_warn(dev->net,
664 			    "Failed to write register index 0x%08x. ret = %d",
665 			    index, ret);
666 	}
667 
668 	kfree(buf);
669 
670 	return ret;
671 }
672 
673 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
674 			      u32 data)
675 {
676 	int ret;
677 	u32 buf;
678 
679 	ret = lan78xx_read_reg(dev, reg, &buf);
680 	if (ret < 0)
681 		return ret;
682 
683 	buf &= ~mask;
684 	buf |= (mask & data);
685 
686 	ret = lan78xx_write_reg(dev, reg, buf);
687 	if (ret < 0)
688 		return ret;
689 
690 	return 0;
691 }
692 
693 static int lan78xx_read_stats(struct lan78xx_net *dev,
694 			      struct lan78xx_statstage *data)
695 {
696 	int ret = 0;
697 	int i;
698 	struct lan78xx_statstage *stats;
699 	u32 *src;
700 	u32 *dst;
701 
702 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
703 	if (!stats)
704 		return -ENOMEM;
705 
706 	ret = usb_control_msg(dev->udev,
707 			      usb_rcvctrlpipe(dev->udev, 0),
708 			      USB_VENDOR_REQUEST_GET_STATS,
709 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
710 			      0,
711 			      0,
712 			      (void *)stats,
713 			      sizeof(*stats),
714 			      USB_CTRL_SET_TIMEOUT);
715 	if (likely(ret >= 0)) {
716 		src = (u32 *)stats;
717 		dst = (u32 *)data;
718 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
719 			le32_to_cpus(&src[i]);
720 			dst[i] = src[i];
721 		}
722 	} else {
723 		netdev_warn(dev->net,
724 			    "Failed to read stat ret = %d", ret);
725 	}
726 
727 	kfree(stats);
728 
729 	return ret;
730 }
731 
732 #define check_counter_rollover(struct1, dev_stats, member)		\
733 	do {								\
734 		if ((struct1)->member < (dev_stats).saved.member)	\
735 			(dev_stats).rollover_count.member++;		\
736 	} while (0)
737 
738 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
739 					struct lan78xx_statstage *stats)
740 {
741 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
742 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
743 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
744 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
745 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
746 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
747 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
748 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
749 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
750 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
752 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
753 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
755 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
756 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
757 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
759 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
761 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
762 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
763 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
764 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
765 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
766 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
767 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
768 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
769 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
770 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
771 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
773 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
774 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
776 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
777 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
779 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
780 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
781 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
783 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
785 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
786 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
787 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
788 
789 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
790 }
791 
792 static void lan78xx_update_stats(struct lan78xx_net *dev)
793 {
794 	u32 *p, *count, *max;
795 	u64 *data;
796 	int i;
797 	struct lan78xx_statstage lan78xx_stats;
798 
799 	if (usb_autopm_get_interface(dev->intf) < 0)
800 		return;
801 
802 	p = (u32 *)&lan78xx_stats;
803 	count = (u32 *)&dev->stats.rollover_count;
804 	max = (u32 *)&dev->stats.rollover_max;
805 	data = (u64 *)&dev->stats.curr_stat;
806 
807 	mutex_lock(&dev->stats.access_lock);
808 
809 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
810 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
811 
812 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
813 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
814 
815 	mutex_unlock(&dev->stats.access_lock);
816 
817 	usb_autopm_put_interface(dev->intf);
818 }
819 
820 /* Loop until the read is completed with timeout called with phy_mutex held */
821 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
822 {
823 	unsigned long start_time = jiffies;
824 	u32 val;
825 	int ret;
826 
827 	do {
828 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
829 		if (unlikely(ret < 0))
830 			return -EIO;
831 
832 		if (!(val & MII_ACC_MII_BUSY_))
833 			return 0;
834 	} while (!time_after(jiffies, start_time + HZ));
835 
836 	return -EIO;
837 }
838 
839 static inline u32 mii_access(int id, int index, int read)
840 {
841 	u32 ret;
842 
843 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
844 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
845 	if (read)
846 		ret |= MII_ACC_MII_READ_;
847 	else
848 		ret |= MII_ACC_MII_WRITE_;
849 	ret |= MII_ACC_MII_BUSY_;
850 
851 	return ret;
852 }
853 
854 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
855 {
856 	unsigned long start_time = jiffies;
857 	u32 val;
858 	int ret;
859 
860 	do {
861 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
862 		if (unlikely(ret < 0))
863 			return -EIO;
864 
865 		if (!(val & E2P_CMD_EPC_BUSY_) ||
866 		    (val & E2P_CMD_EPC_TIMEOUT_))
867 			break;
868 		usleep_range(40, 100);
869 	} while (!time_after(jiffies, start_time + HZ));
870 
871 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
872 		netdev_warn(dev->net, "EEPROM read operation timeout");
873 		return -EIO;
874 	}
875 
876 	return 0;
877 }
878 
879 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
880 {
881 	unsigned long start_time = jiffies;
882 	u32 val;
883 	int ret;
884 
885 	do {
886 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
887 		if (unlikely(ret < 0))
888 			return -EIO;
889 
890 		if (!(val & E2P_CMD_EPC_BUSY_))
891 			return 0;
892 
893 		usleep_range(40, 100);
894 	} while (!time_after(jiffies, start_time + HZ));
895 
896 	netdev_warn(dev->net, "EEPROM is busy");
897 	return -EIO;
898 }
899 
900 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
901 				   u32 length, u8 *data)
902 {
903 	u32 val;
904 	u32 saved;
905 	int i, ret;
906 	int retval;
907 
908 	/* depends on chip, some EEPROM pins are muxed with LED function.
909 	 * disable & restore LED function to access EEPROM.
910 	 */
911 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
912 	saved = val;
913 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
914 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
915 		ret = lan78xx_write_reg(dev, HW_CFG, val);
916 	}
917 
918 	retval = lan78xx_eeprom_confirm_not_busy(dev);
919 	if (retval)
920 		return retval;
921 
922 	for (i = 0; i < length; i++) {
923 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
924 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
925 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
926 		if (unlikely(ret < 0)) {
927 			retval = -EIO;
928 			goto exit;
929 		}
930 
931 		retval = lan78xx_wait_eeprom(dev);
932 		if (retval < 0)
933 			goto exit;
934 
935 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
936 		if (unlikely(ret < 0)) {
937 			retval = -EIO;
938 			goto exit;
939 		}
940 
941 		data[i] = val & 0xFF;
942 		offset++;
943 	}
944 
945 	retval = 0;
946 exit:
947 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
948 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
949 
950 	return retval;
951 }
952 
953 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
954 			       u32 length, u8 *data)
955 {
956 	u8 sig;
957 	int ret;
958 
959 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
960 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
961 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
962 	else
963 		ret = -EINVAL;
964 
965 	return ret;
966 }
967 
968 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
969 				    u32 length, u8 *data)
970 {
971 	u32 val;
972 	u32 saved;
973 	int i, ret;
974 	int retval;
975 
976 	/* depends on chip, some EEPROM pins are muxed with LED function.
977 	 * disable & restore LED function to access EEPROM.
978 	 */
979 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
980 	saved = val;
981 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
982 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
983 		ret = lan78xx_write_reg(dev, HW_CFG, val);
984 	}
985 
986 	retval = lan78xx_eeprom_confirm_not_busy(dev);
987 	if (retval)
988 		goto exit;
989 
990 	/* Issue write/erase enable command */
991 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
992 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
993 	if (unlikely(ret < 0)) {
994 		retval = -EIO;
995 		goto exit;
996 	}
997 
998 	retval = lan78xx_wait_eeprom(dev);
999 	if (retval < 0)
1000 		goto exit;
1001 
1002 	for (i = 0; i < length; i++) {
1003 		/* Fill data register */
1004 		val = data[i];
1005 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1006 		if (ret < 0) {
1007 			retval = -EIO;
1008 			goto exit;
1009 		}
1010 
1011 		/* Send "write" command */
1012 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1013 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1014 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1015 		if (ret < 0) {
1016 			retval = -EIO;
1017 			goto exit;
1018 		}
1019 
1020 		retval = lan78xx_wait_eeprom(dev);
1021 		if (retval < 0)
1022 			goto exit;
1023 
1024 		offset++;
1025 	}
1026 
1027 	retval = 0;
1028 exit:
1029 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1030 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1031 
1032 	return retval;
1033 }
1034 
1035 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1036 				u32 length, u8 *data)
1037 {
1038 	int i;
1039 	u32 buf;
1040 	unsigned long timeout;
1041 
1042 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1043 
1044 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1045 		/* clear it and wait to be cleared */
1046 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1047 
1048 		timeout = jiffies + HZ;
1049 		do {
1050 			usleep_range(1, 10);
1051 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1052 			if (time_after(jiffies, timeout)) {
1053 				netdev_warn(dev->net,
1054 					    "timeout on OTP_PWR_DN");
1055 				return -EIO;
1056 			}
1057 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1058 	}
1059 
1060 	for (i = 0; i < length; i++) {
1061 		lan78xx_write_reg(dev, OTP_ADDR1,
1062 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1063 		lan78xx_write_reg(dev, OTP_ADDR2,
1064 				  ((offset + i) & OTP_ADDR2_10_3));
1065 
1066 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1067 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1068 
1069 		timeout = jiffies + HZ;
1070 		do {
1071 			udelay(1);
1072 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1073 			if (time_after(jiffies, timeout)) {
1074 				netdev_warn(dev->net,
1075 					    "timeout on OTP_STATUS");
1076 				return -EIO;
1077 			}
1078 		} while (buf & OTP_STATUS_BUSY_);
1079 
1080 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1081 
1082 		data[i] = (u8)(buf & 0xFF);
1083 	}
1084 
1085 	return 0;
1086 }
1087 
1088 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1089 				 u32 length, u8 *data)
1090 {
1091 	int i;
1092 	u32 buf;
1093 	unsigned long timeout;
1094 
1095 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096 
1097 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1098 		/* clear it and wait to be cleared */
1099 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1100 
1101 		timeout = jiffies + HZ;
1102 		do {
1103 			udelay(1);
1104 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1105 			if (time_after(jiffies, timeout)) {
1106 				netdev_warn(dev->net,
1107 					    "timeout on OTP_PWR_DN completion");
1108 				return -EIO;
1109 			}
1110 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1111 	}
1112 
1113 	/* set to BYTE program mode */
1114 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1115 
1116 	for (i = 0; i < length; i++) {
1117 		lan78xx_write_reg(dev, OTP_ADDR1,
1118 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1119 		lan78xx_write_reg(dev, OTP_ADDR2,
1120 				  ((offset + i) & OTP_ADDR2_10_3));
1121 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1122 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1123 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1124 
1125 		timeout = jiffies + HZ;
1126 		do {
1127 			udelay(1);
1128 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1129 			if (time_after(jiffies, timeout)) {
1130 				netdev_warn(dev->net,
1131 					    "Timeout on OTP_STATUS completion");
1132 				return -EIO;
1133 			}
1134 		} while (buf & OTP_STATUS_BUSY_);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1141 			    u32 length, u8 *data)
1142 {
1143 	u8 sig;
1144 	int ret;
1145 
1146 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1147 
1148 	if (ret == 0) {
1149 		if (sig == OTP_INDICATOR_2)
1150 			offset += 0x100;
1151 		else if (sig != OTP_INDICATOR_1)
1152 			ret = -EINVAL;
1153 		if (!ret)
1154 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1155 	}
1156 
1157 	return ret;
1158 }
1159 
1160 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1161 {
1162 	int i, ret;
1163 
1164 	for (i = 0; i < 100; i++) {
1165 		u32 dp_sel;
1166 
1167 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1168 		if (unlikely(ret < 0))
1169 			return -EIO;
1170 
1171 		if (dp_sel & DP_SEL_DPRDY_)
1172 			return 0;
1173 
1174 		usleep_range(40, 100);
1175 	}
1176 
1177 	netdev_warn(dev->net, "%s timed out", __func__);
1178 
1179 	return -EIO;
1180 }
1181 
1182 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1183 				  u32 addr, u32 length, u32 *buf)
1184 {
1185 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1186 	u32 dp_sel;
1187 	int i, ret;
1188 
1189 	if (usb_autopm_get_interface(dev->intf) < 0)
1190 		return 0;
1191 
1192 	mutex_lock(&pdata->dataport_mutex);
1193 
1194 	ret = lan78xx_dataport_wait_not_busy(dev);
1195 	if (ret < 0)
1196 		goto done;
1197 
1198 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1199 
1200 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1201 	dp_sel |= ram_select;
1202 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1203 
1204 	for (i = 0; i < length; i++) {
1205 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1206 
1207 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1210 
1211 		ret = lan78xx_dataport_wait_not_busy(dev);
1212 		if (ret < 0)
1213 			goto done;
1214 	}
1215 
1216 done:
1217 	mutex_unlock(&pdata->dataport_mutex);
1218 	usb_autopm_put_interface(dev->intf);
1219 
1220 	return ret;
1221 }
1222 
1223 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1224 				    int index, u8 addr[ETH_ALEN])
1225 {
1226 	u32 temp;
1227 
1228 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1229 		temp = addr[3];
1230 		temp = addr[2] | (temp << 8);
1231 		temp = addr[1] | (temp << 8);
1232 		temp = addr[0] | (temp << 8);
1233 		pdata->pfilter_table[index][1] = temp;
1234 		temp = addr[5];
1235 		temp = addr[4] | (temp << 8);
1236 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1237 		pdata->pfilter_table[index][0] = temp;
1238 	}
1239 }
1240 
1241 /* returns hash bit number for given MAC address */
1242 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1243 {
1244 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1245 }
1246 
1247 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1248 {
1249 	struct lan78xx_priv *pdata =
1250 			container_of(param, struct lan78xx_priv, set_multicast);
1251 	struct lan78xx_net *dev = pdata->dev;
1252 	int i;
1253 
1254 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1255 		  pdata->rfe_ctl);
1256 
1257 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1258 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1259 
1260 	for (i = 1; i < NUM_OF_MAF; i++) {
1261 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1262 		lan78xx_write_reg(dev, MAF_LO(i),
1263 				  pdata->pfilter_table[i][1]);
1264 		lan78xx_write_reg(dev, MAF_HI(i),
1265 				  pdata->pfilter_table[i][0]);
1266 	}
1267 
1268 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1269 }
1270 
1271 static void lan78xx_set_multicast(struct net_device *netdev)
1272 {
1273 	struct lan78xx_net *dev = netdev_priv(netdev);
1274 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275 	unsigned long flags;
1276 	int i;
1277 
1278 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1279 
1280 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1281 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1282 
1283 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1284 		pdata->mchash_table[i] = 0;
1285 
1286 	/* pfilter_table[0] has own HW address */
1287 	for (i = 1; i < NUM_OF_MAF; i++) {
1288 		pdata->pfilter_table[i][0] = 0;
1289 		pdata->pfilter_table[i][1] = 0;
1290 	}
1291 
1292 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1293 
1294 	if (dev->net->flags & IFF_PROMISC) {
1295 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1296 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1297 	} else {
1298 		if (dev->net->flags & IFF_ALLMULTI) {
1299 			netif_dbg(dev, drv, dev->net,
1300 				  "receive all multicast enabled");
1301 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1302 		}
1303 	}
1304 
1305 	if (netdev_mc_count(dev->net)) {
1306 		struct netdev_hw_addr *ha;
1307 		int i;
1308 
1309 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1310 
1311 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1312 
1313 		i = 1;
1314 		netdev_for_each_mc_addr(ha, netdev) {
1315 			/* set first 32 into Perfect Filter */
1316 			if (i < 33) {
1317 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1318 			} else {
1319 				u32 bitnum = lan78xx_hash(ha->addr);
1320 
1321 				pdata->mchash_table[bitnum / 32] |=
1322 							(1 << (bitnum % 32));
1323 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1324 			}
1325 			i++;
1326 		}
1327 	}
1328 
1329 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1330 
1331 	/* defer register writes to a sleepable context */
1332 	schedule_work(&pdata->set_multicast);
1333 }
1334 
1335 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1336 				      u16 lcladv, u16 rmtadv)
1337 {
1338 	u32 flow = 0, fct_flow = 0;
1339 	u8 cap;
1340 
1341 	if (dev->fc_autoneg)
1342 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1343 	else
1344 		cap = dev->fc_request_control;
1345 
1346 	if (cap & FLOW_CTRL_TX)
1347 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1348 
1349 	if (cap & FLOW_CTRL_RX)
1350 		flow |= FLOW_CR_RX_FCEN_;
1351 
1352 	if (dev->udev->speed == USB_SPEED_SUPER)
1353 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1354 	else if (dev->udev->speed == USB_SPEED_HIGH)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1356 
1357 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1358 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1359 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1360 
1361 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1362 
1363 	/* threshold value should be set before enabling flow */
1364 	lan78xx_write_reg(dev, FLOW, flow);
1365 
1366 	return 0;
1367 }
1368 
1369 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1370 
1371 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1372 {
1373 	unsigned long start_time = jiffies;
1374 	u32 val;
1375 	int ret;
1376 
1377 	mutex_lock(&dev->phy_mutex);
1378 
1379 	/* Resetting the device while there is activity on the MDIO
1380 	 * bus can result in the MAC interface locking up and not
1381 	 * completing register access transactions.
1382 	 */
1383 	ret = lan78xx_phy_wait_not_busy(dev);
1384 	if (ret < 0)
1385 		goto done;
1386 
1387 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1388 	if (ret < 0)
1389 		goto done;
1390 
1391 	val |= MAC_CR_RST_;
1392 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1393 	if (ret < 0)
1394 		goto done;
1395 
1396 	/* Wait for the reset to complete before allowing any further
1397 	 * MAC register accesses otherwise the MAC may lock up.
1398 	 */
1399 	do {
1400 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1401 		if (ret < 0)
1402 			goto done;
1403 
1404 		if (!(val & MAC_CR_RST_)) {
1405 			ret = 0;
1406 			goto done;
1407 		}
1408 	} while (!time_after(jiffies, start_time + HZ));
1409 
1410 	ret = -ETIMEDOUT;
1411 done:
1412 	mutex_unlock(&dev->phy_mutex);
1413 
1414 	return ret;
1415 }
1416 
1417 static int lan78xx_link_reset(struct lan78xx_net *dev)
1418 {
1419 	struct phy_device *phydev = dev->net->phydev;
1420 	struct ethtool_link_ksettings ecmd;
1421 	int ladv, radv, ret, link;
1422 	u32 buf;
1423 
1424 	/* clear LAN78xx interrupt status */
1425 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1426 	if (unlikely(ret < 0))
1427 		return ret;
1428 
1429 	mutex_lock(&phydev->lock);
1430 	phy_read_status(phydev);
1431 	link = phydev->link;
1432 	mutex_unlock(&phydev->lock);
1433 
1434 	if (!link && dev->link_on) {
1435 		dev->link_on = false;
1436 
1437 		/* reset MAC */
1438 		ret = lan78xx_mac_reset(dev);
1439 		if (ret < 0)
1440 			return ret;
1441 
1442 		del_timer(&dev->stat_monitor);
1443 	} else if (link && !dev->link_on) {
1444 		dev->link_on = true;
1445 
1446 		phy_ethtool_ksettings_get(phydev, &ecmd);
1447 
1448 		if (dev->udev->speed == USB_SPEED_SUPER) {
1449 			if (ecmd.base.speed == 1000) {
1450 				/* disable U2 */
1451 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1452 				if (ret < 0)
1453 					return ret;
1454 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1455 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1456 				if (ret < 0)
1457 					return ret;
1458 				/* enable U1 */
1459 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1460 				if (ret < 0)
1461 					return ret;
1462 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1463 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1464 				if (ret < 0)
1465 					return ret;
1466 			} else {
1467 				/* enable U1 & U2 */
1468 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469 				if (ret < 0)
1470 					return ret;
1471 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1472 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1473 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1474 				if (ret < 0)
1475 					return ret;
1476 			}
1477 		}
1478 
1479 		ladv = phy_read(phydev, MII_ADVERTISE);
1480 		if (ladv < 0)
1481 			return ladv;
1482 
1483 		radv = phy_read(phydev, MII_LPA);
1484 		if (radv < 0)
1485 			return radv;
1486 
1487 		netif_dbg(dev, link, dev->net,
1488 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1489 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1490 
1491 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1492 						 radv);
1493 		if (ret < 0)
1494 			return ret;
1495 
1496 		if (!timer_pending(&dev->stat_monitor)) {
1497 			dev->delta = 1;
1498 			mod_timer(&dev->stat_monitor,
1499 				  jiffies + STAT_UPDATE_TIMER);
1500 		}
1501 
1502 		lan78xx_rx_urb_submit_all(dev);
1503 
1504 		napi_schedule(&dev->napi);
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /* some work can't be done in tasklets, so we use keventd
1511  *
1512  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1513  * but tasklet_schedule() doesn't.	hope the failure is rare.
1514  */
1515 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1516 {
1517 	set_bit(work, &dev->flags);
1518 	if (!schedule_delayed_work(&dev->wq, 0))
1519 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1520 }
1521 
1522 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1523 {
1524 	u32 intdata;
1525 
1526 	if (urb->actual_length != 4) {
1527 		netdev_warn(dev->net,
1528 			    "unexpected urb length %d", urb->actual_length);
1529 		return;
1530 	}
1531 
1532 	intdata = get_unaligned_le32(urb->transfer_buffer);
1533 
1534 	if (intdata & INT_ENP_PHY_INT) {
1535 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1536 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1537 
1538 		if (dev->domain_data.phyirq > 0)
1539 			generic_handle_irq_safe(dev->domain_data.phyirq);
1540 	} else {
1541 		netdev_warn(dev->net,
1542 			    "unexpected interrupt: 0x%08x\n", intdata);
1543 	}
1544 }
1545 
1546 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1547 {
1548 	return MAX_EEPROM_SIZE;
1549 }
1550 
1551 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1552 				      struct ethtool_eeprom *ee, u8 *data)
1553 {
1554 	struct lan78xx_net *dev = netdev_priv(netdev);
1555 	int ret;
1556 
1557 	ret = usb_autopm_get_interface(dev->intf);
1558 	if (ret)
1559 		return ret;
1560 
1561 	ee->magic = LAN78XX_EEPROM_MAGIC;
1562 
1563 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1564 
1565 	usb_autopm_put_interface(dev->intf);
1566 
1567 	return ret;
1568 }
1569 
1570 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1571 				      struct ethtool_eeprom *ee, u8 *data)
1572 {
1573 	struct lan78xx_net *dev = netdev_priv(netdev);
1574 	int ret;
1575 
1576 	ret = usb_autopm_get_interface(dev->intf);
1577 	if (ret)
1578 		return ret;
1579 
1580 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1581 	 * to load data from EEPROM
1582 	 */
1583 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1584 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1585 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1586 		 (ee->offset == 0) &&
1587 		 (ee->len == 512) &&
1588 		 (data[0] == OTP_INDICATOR_1))
1589 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1590 
1591 	usb_autopm_put_interface(dev->intf);
1592 
1593 	return ret;
1594 }
1595 
1596 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1597 				u8 *data)
1598 {
1599 	if (stringset == ETH_SS_STATS)
1600 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1601 }
1602 
1603 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1604 {
1605 	if (sset == ETH_SS_STATS)
1606 		return ARRAY_SIZE(lan78xx_gstrings);
1607 	else
1608 		return -EOPNOTSUPP;
1609 }
1610 
1611 static void lan78xx_get_stats(struct net_device *netdev,
1612 			      struct ethtool_stats *stats, u64 *data)
1613 {
1614 	struct lan78xx_net *dev = netdev_priv(netdev);
1615 
1616 	lan78xx_update_stats(dev);
1617 
1618 	mutex_lock(&dev->stats.access_lock);
1619 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1620 	mutex_unlock(&dev->stats.access_lock);
1621 }
1622 
1623 static void lan78xx_get_wol(struct net_device *netdev,
1624 			    struct ethtool_wolinfo *wol)
1625 {
1626 	struct lan78xx_net *dev = netdev_priv(netdev);
1627 	int ret;
1628 	u32 buf;
1629 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1630 
1631 	if (usb_autopm_get_interface(dev->intf) < 0)
1632 		return;
1633 
1634 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1635 	if (unlikely(ret < 0)) {
1636 		wol->supported = 0;
1637 		wol->wolopts = 0;
1638 	} else {
1639 		if (buf & USB_CFG_RMT_WKP_) {
1640 			wol->supported = WAKE_ALL;
1641 			wol->wolopts = pdata->wol;
1642 		} else {
1643 			wol->supported = 0;
1644 			wol->wolopts = 0;
1645 		}
1646 	}
1647 
1648 	usb_autopm_put_interface(dev->intf);
1649 }
1650 
1651 static int lan78xx_set_wol(struct net_device *netdev,
1652 			   struct ethtool_wolinfo *wol)
1653 {
1654 	struct lan78xx_net *dev = netdev_priv(netdev);
1655 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1656 	int ret;
1657 
1658 	ret = usb_autopm_get_interface(dev->intf);
1659 	if (ret < 0)
1660 		return ret;
1661 
1662 	if (wol->wolopts & ~WAKE_ALL)
1663 		return -EINVAL;
1664 
1665 	pdata->wol = wol->wolopts;
1666 
1667 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1668 
1669 	phy_ethtool_set_wol(netdev->phydev, wol);
1670 
1671 	usb_autopm_put_interface(dev->intf);
1672 
1673 	return ret;
1674 }
1675 
1676 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1677 {
1678 	struct lan78xx_net *dev = netdev_priv(net);
1679 	struct phy_device *phydev = net->phydev;
1680 	int ret;
1681 	u32 buf;
1682 
1683 	ret = usb_autopm_get_interface(dev->intf);
1684 	if (ret < 0)
1685 		return ret;
1686 
1687 	ret = phy_ethtool_get_eee(phydev, edata);
1688 	if (ret < 0)
1689 		goto exit;
1690 
1691 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1692 	if (buf & MAC_CR_EEE_EN_) {
1693 		edata->eee_enabled = true;
1694 		edata->tx_lpi_enabled = true;
1695 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1696 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1697 		edata->tx_lpi_timer = buf;
1698 	} else {
1699 		edata->eee_enabled = false;
1700 		edata->eee_active = false;
1701 		edata->tx_lpi_enabled = false;
1702 		edata->tx_lpi_timer = 0;
1703 	}
1704 
1705 	ret = 0;
1706 exit:
1707 	usb_autopm_put_interface(dev->intf);
1708 
1709 	return ret;
1710 }
1711 
1712 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1713 {
1714 	struct lan78xx_net *dev = netdev_priv(net);
1715 	int ret;
1716 	u32 buf;
1717 
1718 	ret = usb_autopm_get_interface(dev->intf);
1719 	if (ret < 0)
1720 		return ret;
1721 
1722 	if (edata->eee_enabled) {
1723 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1724 		buf |= MAC_CR_EEE_EN_;
1725 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1726 
1727 		phy_ethtool_set_eee(net->phydev, edata);
1728 
1729 		buf = (u32)edata->tx_lpi_timer;
1730 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1731 	} else {
1732 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1733 		buf &= ~MAC_CR_EEE_EN_;
1734 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1735 	}
1736 
1737 	usb_autopm_put_interface(dev->intf);
1738 
1739 	return 0;
1740 }
1741 
1742 static u32 lan78xx_get_link(struct net_device *net)
1743 {
1744 	u32 link;
1745 
1746 	mutex_lock(&net->phydev->lock);
1747 	phy_read_status(net->phydev);
1748 	link = net->phydev->link;
1749 	mutex_unlock(&net->phydev->lock);
1750 
1751 	return link;
1752 }
1753 
1754 static void lan78xx_get_drvinfo(struct net_device *net,
1755 				struct ethtool_drvinfo *info)
1756 {
1757 	struct lan78xx_net *dev = netdev_priv(net);
1758 
1759 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1760 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1761 }
1762 
1763 static u32 lan78xx_get_msglevel(struct net_device *net)
1764 {
1765 	struct lan78xx_net *dev = netdev_priv(net);
1766 
1767 	return dev->msg_enable;
1768 }
1769 
1770 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1771 {
1772 	struct lan78xx_net *dev = netdev_priv(net);
1773 
1774 	dev->msg_enable = level;
1775 }
1776 
1777 static int lan78xx_get_link_ksettings(struct net_device *net,
1778 				      struct ethtool_link_ksettings *cmd)
1779 {
1780 	struct lan78xx_net *dev = netdev_priv(net);
1781 	struct phy_device *phydev = net->phydev;
1782 	int ret;
1783 
1784 	ret = usb_autopm_get_interface(dev->intf);
1785 	if (ret < 0)
1786 		return ret;
1787 
1788 	phy_ethtool_ksettings_get(phydev, cmd);
1789 
1790 	usb_autopm_put_interface(dev->intf);
1791 
1792 	return ret;
1793 }
1794 
1795 static int lan78xx_set_link_ksettings(struct net_device *net,
1796 				      const struct ethtool_link_ksettings *cmd)
1797 {
1798 	struct lan78xx_net *dev = netdev_priv(net);
1799 	struct phy_device *phydev = net->phydev;
1800 	int ret = 0;
1801 	int temp;
1802 
1803 	ret = usb_autopm_get_interface(dev->intf);
1804 	if (ret < 0)
1805 		return ret;
1806 
1807 	/* change speed & duplex */
1808 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1809 
1810 	if (!cmd->base.autoneg) {
1811 		/* force link down */
1812 		temp = phy_read(phydev, MII_BMCR);
1813 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1814 		mdelay(1);
1815 		phy_write(phydev, MII_BMCR, temp);
1816 	}
1817 
1818 	usb_autopm_put_interface(dev->intf);
1819 
1820 	return ret;
1821 }
1822 
1823 static void lan78xx_get_pause(struct net_device *net,
1824 			      struct ethtool_pauseparam *pause)
1825 {
1826 	struct lan78xx_net *dev = netdev_priv(net);
1827 	struct phy_device *phydev = net->phydev;
1828 	struct ethtool_link_ksettings ecmd;
1829 
1830 	phy_ethtool_ksettings_get(phydev, &ecmd);
1831 
1832 	pause->autoneg = dev->fc_autoneg;
1833 
1834 	if (dev->fc_request_control & FLOW_CTRL_TX)
1835 		pause->tx_pause = 1;
1836 
1837 	if (dev->fc_request_control & FLOW_CTRL_RX)
1838 		pause->rx_pause = 1;
1839 }
1840 
1841 static int lan78xx_set_pause(struct net_device *net,
1842 			     struct ethtool_pauseparam *pause)
1843 {
1844 	struct lan78xx_net *dev = netdev_priv(net);
1845 	struct phy_device *phydev = net->phydev;
1846 	struct ethtool_link_ksettings ecmd;
1847 	int ret;
1848 
1849 	phy_ethtool_ksettings_get(phydev, &ecmd);
1850 
1851 	if (pause->autoneg && !ecmd.base.autoneg) {
1852 		ret = -EINVAL;
1853 		goto exit;
1854 	}
1855 
1856 	dev->fc_request_control = 0;
1857 	if (pause->rx_pause)
1858 		dev->fc_request_control |= FLOW_CTRL_RX;
1859 
1860 	if (pause->tx_pause)
1861 		dev->fc_request_control |= FLOW_CTRL_TX;
1862 
1863 	if (ecmd.base.autoneg) {
1864 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1865 		u32 mii_adv;
1866 
1867 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1868 				   ecmd.link_modes.advertising);
1869 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1870 				   ecmd.link_modes.advertising);
1871 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1872 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1873 		linkmode_or(ecmd.link_modes.advertising, fc,
1874 			    ecmd.link_modes.advertising);
1875 
1876 		phy_ethtool_ksettings_set(phydev, &ecmd);
1877 	}
1878 
1879 	dev->fc_autoneg = pause->autoneg;
1880 
1881 	ret = 0;
1882 exit:
1883 	return ret;
1884 }
1885 
1886 static int lan78xx_get_regs_len(struct net_device *netdev)
1887 {
1888 	if (!netdev->phydev)
1889 		return (sizeof(lan78xx_regs));
1890 	else
1891 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1892 }
1893 
1894 static void
1895 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1896 		 void *buf)
1897 {
1898 	u32 *data = buf;
1899 	int i, j;
1900 	struct lan78xx_net *dev = netdev_priv(netdev);
1901 
1902 	/* Read Device/MAC registers */
1903 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1904 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1905 
1906 	if (!netdev->phydev)
1907 		return;
1908 
1909 	/* Read PHY registers */
1910 	for (j = 0; j < 32; i++, j++)
1911 		data[i] = phy_read(netdev->phydev, j);
1912 }
1913 
1914 static const struct ethtool_ops lan78xx_ethtool_ops = {
1915 	.get_link	= lan78xx_get_link,
1916 	.nway_reset	= phy_ethtool_nway_reset,
1917 	.get_drvinfo	= lan78xx_get_drvinfo,
1918 	.get_msglevel	= lan78xx_get_msglevel,
1919 	.set_msglevel	= lan78xx_set_msglevel,
1920 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1921 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1922 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1923 	.get_ethtool_stats = lan78xx_get_stats,
1924 	.get_sset_count = lan78xx_get_sset_count,
1925 	.get_strings	= lan78xx_get_strings,
1926 	.get_wol	= lan78xx_get_wol,
1927 	.set_wol	= lan78xx_set_wol,
1928 	.get_ts_info	= ethtool_op_get_ts_info,
1929 	.get_eee	= lan78xx_get_eee,
1930 	.set_eee	= lan78xx_set_eee,
1931 	.get_pauseparam	= lan78xx_get_pause,
1932 	.set_pauseparam	= lan78xx_set_pause,
1933 	.get_link_ksettings = lan78xx_get_link_ksettings,
1934 	.set_link_ksettings = lan78xx_set_link_ksettings,
1935 	.get_regs_len	= lan78xx_get_regs_len,
1936 	.get_regs	= lan78xx_get_regs,
1937 };
1938 
1939 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1940 {
1941 	u32 addr_lo, addr_hi;
1942 	u8 addr[6];
1943 
1944 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1945 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1946 
1947 	addr[0] = addr_lo & 0xFF;
1948 	addr[1] = (addr_lo >> 8) & 0xFF;
1949 	addr[2] = (addr_lo >> 16) & 0xFF;
1950 	addr[3] = (addr_lo >> 24) & 0xFF;
1951 	addr[4] = addr_hi & 0xFF;
1952 	addr[5] = (addr_hi >> 8) & 0xFF;
1953 
1954 	if (!is_valid_ether_addr(addr)) {
1955 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1956 			/* valid address present in Device Tree */
1957 			netif_dbg(dev, ifup, dev->net,
1958 				  "MAC address read from Device Tree");
1959 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1960 						 ETH_ALEN, addr) == 0) ||
1961 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1962 					      ETH_ALEN, addr) == 0)) &&
1963 			   is_valid_ether_addr(addr)) {
1964 			/* eeprom values are valid so use them */
1965 			netif_dbg(dev, ifup, dev->net,
1966 				  "MAC address read from EEPROM");
1967 		} else {
1968 			/* generate random MAC */
1969 			eth_random_addr(addr);
1970 			netif_dbg(dev, ifup, dev->net,
1971 				  "MAC address set to random addr");
1972 		}
1973 
1974 		addr_lo = addr[0] | (addr[1] << 8) |
1975 			  (addr[2] << 16) | (addr[3] << 24);
1976 		addr_hi = addr[4] | (addr[5] << 8);
1977 
1978 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1979 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1980 	}
1981 
1982 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1983 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1984 
1985 	eth_hw_addr_set(dev->net, addr);
1986 }
1987 
1988 /* MDIO read and write wrappers for phylib */
1989 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1990 {
1991 	struct lan78xx_net *dev = bus->priv;
1992 	u32 val, addr;
1993 	int ret;
1994 
1995 	ret = usb_autopm_get_interface(dev->intf);
1996 	if (ret < 0)
1997 		return ret;
1998 
1999 	mutex_lock(&dev->phy_mutex);
2000 
2001 	/* confirm MII not busy */
2002 	ret = lan78xx_phy_wait_not_busy(dev);
2003 	if (ret < 0)
2004 		goto done;
2005 
2006 	/* set the address, index & direction (read from PHY) */
2007 	addr = mii_access(phy_id, idx, MII_READ);
2008 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2009 
2010 	ret = lan78xx_phy_wait_not_busy(dev);
2011 	if (ret < 0)
2012 		goto done;
2013 
2014 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2015 
2016 	ret = (int)(val & 0xFFFF);
2017 
2018 done:
2019 	mutex_unlock(&dev->phy_mutex);
2020 	usb_autopm_put_interface(dev->intf);
2021 
2022 	return ret;
2023 }
2024 
2025 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2026 				 u16 regval)
2027 {
2028 	struct lan78xx_net *dev = bus->priv;
2029 	u32 val, addr;
2030 	int ret;
2031 
2032 	ret = usb_autopm_get_interface(dev->intf);
2033 	if (ret < 0)
2034 		return ret;
2035 
2036 	mutex_lock(&dev->phy_mutex);
2037 
2038 	/* confirm MII not busy */
2039 	ret = lan78xx_phy_wait_not_busy(dev);
2040 	if (ret < 0)
2041 		goto done;
2042 
2043 	val = (u32)regval;
2044 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2045 
2046 	/* set the address, index & direction (write to PHY) */
2047 	addr = mii_access(phy_id, idx, MII_WRITE);
2048 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2049 
2050 	ret = lan78xx_phy_wait_not_busy(dev);
2051 	if (ret < 0)
2052 		goto done;
2053 
2054 done:
2055 	mutex_unlock(&dev->phy_mutex);
2056 	usb_autopm_put_interface(dev->intf);
2057 	return 0;
2058 }
2059 
2060 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2061 {
2062 	struct device_node *node;
2063 	int ret;
2064 
2065 	dev->mdiobus = mdiobus_alloc();
2066 	if (!dev->mdiobus) {
2067 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2068 		return -ENOMEM;
2069 	}
2070 
2071 	dev->mdiobus->priv = (void *)dev;
2072 	dev->mdiobus->read = lan78xx_mdiobus_read;
2073 	dev->mdiobus->write = lan78xx_mdiobus_write;
2074 	dev->mdiobus->name = "lan78xx-mdiobus";
2075 	dev->mdiobus->parent = &dev->udev->dev;
2076 
2077 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2078 		 dev->udev->bus->busnum, dev->udev->devnum);
2079 
2080 	switch (dev->chipid) {
2081 	case ID_REV_CHIP_ID_7800_:
2082 	case ID_REV_CHIP_ID_7850_:
2083 		/* set to internal PHY id */
2084 		dev->mdiobus->phy_mask = ~(1 << 1);
2085 		break;
2086 	case ID_REV_CHIP_ID_7801_:
2087 		/* scan thru PHYAD[2..0] */
2088 		dev->mdiobus->phy_mask = ~(0xFF);
2089 		break;
2090 	}
2091 
2092 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2093 	ret = of_mdiobus_register(dev->mdiobus, node);
2094 	of_node_put(node);
2095 	if (ret) {
2096 		netdev_err(dev->net, "can't register MDIO bus\n");
2097 		goto exit1;
2098 	}
2099 
2100 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2101 	return 0;
2102 exit1:
2103 	mdiobus_free(dev->mdiobus);
2104 	return ret;
2105 }
2106 
2107 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2108 {
2109 	mdiobus_unregister(dev->mdiobus);
2110 	mdiobus_free(dev->mdiobus);
2111 }
2112 
2113 static void lan78xx_link_status_change(struct net_device *net)
2114 {
2115 	struct phy_device *phydev = net->phydev;
2116 
2117 	phy_print_status(phydev);
2118 }
2119 
2120 static int irq_map(struct irq_domain *d, unsigned int irq,
2121 		   irq_hw_number_t hwirq)
2122 {
2123 	struct irq_domain_data *data = d->host_data;
2124 
2125 	irq_set_chip_data(irq, data);
2126 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2127 	irq_set_noprobe(irq);
2128 
2129 	return 0;
2130 }
2131 
2132 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2133 {
2134 	irq_set_chip_and_handler(irq, NULL, NULL);
2135 	irq_set_chip_data(irq, NULL);
2136 }
2137 
2138 static const struct irq_domain_ops chip_domain_ops = {
2139 	.map	= irq_map,
2140 	.unmap	= irq_unmap,
2141 };
2142 
2143 static void lan78xx_irq_mask(struct irq_data *irqd)
2144 {
2145 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2146 
2147 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2148 }
2149 
2150 static void lan78xx_irq_unmask(struct irq_data *irqd)
2151 {
2152 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2153 
2154 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2155 }
2156 
2157 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2158 {
2159 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2160 
2161 	mutex_lock(&data->irq_lock);
2162 }
2163 
2164 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2165 {
2166 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2167 	struct lan78xx_net *dev =
2168 			container_of(data, struct lan78xx_net, domain_data);
2169 	u32 buf;
2170 
2171 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2172 	 * are only two callbacks executed in non-atomic contex.
2173 	 */
2174 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2175 	if (buf != data->irqenable)
2176 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2177 
2178 	mutex_unlock(&data->irq_lock);
2179 }
2180 
2181 static struct irq_chip lan78xx_irqchip = {
2182 	.name			= "lan78xx-irqs",
2183 	.irq_mask		= lan78xx_irq_mask,
2184 	.irq_unmask		= lan78xx_irq_unmask,
2185 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2186 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2187 };
2188 
2189 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2190 {
2191 	struct device_node *of_node;
2192 	struct irq_domain *irqdomain;
2193 	unsigned int irqmap = 0;
2194 	u32 buf;
2195 	int ret = 0;
2196 
2197 	of_node = dev->udev->dev.parent->of_node;
2198 
2199 	mutex_init(&dev->domain_data.irq_lock);
2200 
2201 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2202 	dev->domain_data.irqenable = buf;
2203 
2204 	dev->domain_data.irqchip = &lan78xx_irqchip;
2205 	dev->domain_data.irq_handler = handle_simple_irq;
2206 
2207 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2208 					  &chip_domain_ops, &dev->domain_data);
2209 	if (irqdomain) {
2210 		/* create mapping for PHY interrupt */
2211 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2212 		if (!irqmap) {
2213 			irq_domain_remove(irqdomain);
2214 
2215 			irqdomain = NULL;
2216 			ret = -EINVAL;
2217 		}
2218 	} else {
2219 		ret = -EINVAL;
2220 	}
2221 
2222 	dev->domain_data.irqdomain = irqdomain;
2223 	dev->domain_data.phyirq = irqmap;
2224 
2225 	return ret;
2226 }
2227 
2228 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2229 {
2230 	if (dev->domain_data.phyirq > 0) {
2231 		irq_dispose_mapping(dev->domain_data.phyirq);
2232 
2233 		if (dev->domain_data.irqdomain)
2234 			irq_domain_remove(dev->domain_data.irqdomain);
2235 	}
2236 	dev->domain_data.phyirq = 0;
2237 	dev->domain_data.irqdomain = NULL;
2238 }
2239 
2240 static int lan8835_fixup(struct phy_device *phydev)
2241 {
2242 	int buf;
2243 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2244 
2245 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2246 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2247 	buf &= ~0x1800;
2248 	buf |= 0x0800;
2249 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2250 
2251 	/* RGMII MAC TXC Delay Enable */
2252 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2253 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2254 
2255 	/* RGMII TX DLL Tune Adjust */
2256 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2257 
2258 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2259 
2260 	return 1;
2261 }
2262 
2263 static int ksz9031rnx_fixup(struct phy_device *phydev)
2264 {
2265 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2266 
2267 	/* Micrel9301RNX PHY configuration */
2268 	/* RGMII Control Signal Pad Skew */
2269 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2270 	/* RGMII RX Data Pad Skew */
2271 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2272 	/* RGMII RX Clock Pad Skew */
2273 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2274 
2275 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2276 
2277 	return 1;
2278 }
2279 
2280 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2281 {
2282 	u32 buf;
2283 	int ret;
2284 	struct fixed_phy_status fphy_status = {
2285 		.link = 1,
2286 		.speed = SPEED_1000,
2287 		.duplex = DUPLEX_FULL,
2288 	};
2289 	struct phy_device *phydev;
2290 
2291 	phydev = phy_find_first(dev->mdiobus);
2292 	if (!phydev) {
2293 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2294 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2295 		if (IS_ERR(phydev)) {
2296 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2297 			return NULL;
2298 		}
2299 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2300 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2301 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2302 					MAC_RGMII_ID_TXC_DELAY_EN_);
2303 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2304 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2305 		buf |= HW_CFG_CLK125_EN_;
2306 		buf |= HW_CFG_REFCLK25_EN_;
2307 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2308 	} else {
2309 		if (!phydev->drv) {
2310 			netdev_err(dev->net, "no PHY driver found\n");
2311 			return NULL;
2312 		}
2313 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2314 		/* external PHY fixup for KSZ9031RNX */
2315 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2316 						 ksz9031rnx_fixup);
2317 		if (ret < 0) {
2318 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2319 			return NULL;
2320 		}
2321 		/* external PHY fixup for LAN8835 */
2322 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2323 						 lan8835_fixup);
2324 		if (ret < 0) {
2325 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2326 			return NULL;
2327 		}
2328 		/* add more external PHY fixup here if needed */
2329 
2330 		phydev->is_internal = false;
2331 	}
2332 	return phydev;
2333 }
2334 
2335 static int lan78xx_phy_init(struct lan78xx_net *dev)
2336 {
2337 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2338 	int ret;
2339 	u32 mii_adv;
2340 	struct phy_device *phydev;
2341 
2342 	switch (dev->chipid) {
2343 	case ID_REV_CHIP_ID_7801_:
2344 		phydev = lan7801_phy_init(dev);
2345 		if (!phydev) {
2346 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2347 			return -EIO;
2348 		}
2349 		break;
2350 
2351 	case ID_REV_CHIP_ID_7800_:
2352 	case ID_REV_CHIP_ID_7850_:
2353 		phydev = phy_find_first(dev->mdiobus);
2354 		if (!phydev) {
2355 			netdev_err(dev->net, "no PHY found\n");
2356 			return -EIO;
2357 		}
2358 		phydev->is_internal = true;
2359 		dev->interface = PHY_INTERFACE_MODE_GMII;
2360 		break;
2361 
2362 	default:
2363 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2364 		return -EIO;
2365 	}
2366 
2367 	/* if phyirq is not set, use polling mode in phylib */
2368 	if (dev->domain_data.phyirq > 0)
2369 		phydev->irq = dev->domain_data.phyirq;
2370 	else
2371 		phydev->irq = PHY_POLL;
2372 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2373 
2374 	/* set to AUTOMDIX */
2375 	phydev->mdix = ETH_TP_MDI_AUTO;
2376 
2377 	ret = phy_connect_direct(dev->net, phydev,
2378 				 lan78xx_link_status_change,
2379 				 dev->interface);
2380 	if (ret) {
2381 		netdev_err(dev->net, "can't attach PHY to %s\n",
2382 			   dev->mdiobus->id);
2383 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2384 			if (phy_is_pseudo_fixed_link(phydev)) {
2385 				fixed_phy_unregister(phydev);
2386 			} else {
2387 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2388 							     0xfffffff0);
2389 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2390 							     0xfffffff0);
2391 			}
2392 		}
2393 		return -EIO;
2394 	}
2395 
2396 	/* MAC doesn't support 1000T Half */
2397 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2398 
2399 	/* support both flow controls */
2400 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2401 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2402 			   phydev->advertising);
2403 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2404 			   phydev->advertising);
2405 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2406 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2407 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2408 
2409 	if (phydev->mdio.dev.of_node) {
2410 		u32 reg;
2411 		int len;
2412 
2413 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2414 						      "microchip,led-modes",
2415 						      sizeof(u32));
2416 		if (len >= 0) {
2417 			/* Ensure the appropriate LEDs are enabled */
2418 			lan78xx_read_reg(dev, HW_CFG, &reg);
2419 			reg &= ~(HW_CFG_LED0_EN_ |
2420 				 HW_CFG_LED1_EN_ |
2421 				 HW_CFG_LED2_EN_ |
2422 				 HW_CFG_LED3_EN_);
2423 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2424 				(len > 1) * HW_CFG_LED1_EN_ |
2425 				(len > 2) * HW_CFG_LED2_EN_ |
2426 				(len > 3) * HW_CFG_LED3_EN_;
2427 			lan78xx_write_reg(dev, HW_CFG, reg);
2428 		}
2429 	}
2430 
2431 	genphy_config_aneg(phydev);
2432 
2433 	dev->fc_autoneg = phydev->autoneg;
2434 
2435 	return 0;
2436 }
2437 
2438 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2439 {
2440 	u32 buf;
2441 	bool rxenabled;
2442 
2443 	lan78xx_read_reg(dev, MAC_RX, &buf);
2444 
2445 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2446 
2447 	if (rxenabled) {
2448 		buf &= ~MAC_RX_RXEN_;
2449 		lan78xx_write_reg(dev, MAC_RX, buf);
2450 	}
2451 
2452 	/* add 4 to size for FCS */
2453 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2454 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2455 
2456 	lan78xx_write_reg(dev, MAC_RX, buf);
2457 
2458 	if (rxenabled) {
2459 		buf |= MAC_RX_RXEN_;
2460 		lan78xx_write_reg(dev, MAC_RX, buf);
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2467 {
2468 	struct sk_buff *skb;
2469 	unsigned long flags;
2470 	int count = 0;
2471 
2472 	spin_lock_irqsave(&q->lock, flags);
2473 	while (!skb_queue_empty(q)) {
2474 		struct skb_data	*entry;
2475 		struct urb *urb;
2476 		int ret;
2477 
2478 		skb_queue_walk(q, skb) {
2479 			entry = (struct skb_data *)skb->cb;
2480 			if (entry->state != unlink_start)
2481 				goto found;
2482 		}
2483 		break;
2484 found:
2485 		entry->state = unlink_start;
2486 		urb = entry->urb;
2487 
2488 		/* Get reference count of the URB to avoid it to be
2489 		 * freed during usb_unlink_urb, which may trigger
2490 		 * use-after-free problem inside usb_unlink_urb since
2491 		 * usb_unlink_urb is always racing with .complete
2492 		 * handler(include defer_bh).
2493 		 */
2494 		usb_get_urb(urb);
2495 		spin_unlock_irqrestore(&q->lock, flags);
2496 		/* during some PM-driven resume scenarios,
2497 		 * these (async) unlinks complete immediately
2498 		 */
2499 		ret = usb_unlink_urb(urb);
2500 		if (ret != -EINPROGRESS && ret != 0)
2501 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2502 		else
2503 			count++;
2504 		usb_put_urb(urb);
2505 		spin_lock_irqsave(&q->lock, flags);
2506 	}
2507 	spin_unlock_irqrestore(&q->lock, flags);
2508 	return count;
2509 }
2510 
2511 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2512 {
2513 	struct lan78xx_net *dev = netdev_priv(netdev);
2514 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2515 	int ret;
2516 
2517 	/* no second zero-length packet read wanted after mtu-sized packets */
2518 	if ((max_frame_len % dev->maxpacket) == 0)
2519 		return -EDOM;
2520 
2521 	ret = usb_autopm_get_interface(dev->intf);
2522 	if (ret < 0)
2523 		return ret;
2524 
2525 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2526 	if (!ret)
2527 		netdev->mtu = new_mtu;
2528 
2529 	usb_autopm_put_interface(dev->intf);
2530 
2531 	return ret;
2532 }
2533 
2534 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2535 {
2536 	struct lan78xx_net *dev = netdev_priv(netdev);
2537 	struct sockaddr *addr = p;
2538 	u32 addr_lo, addr_hi;
2539 
2540 	if (netif_running(netdev))
2541 		return -EBUSY;
2542 
2543 	if (!is_valid_ether_addr(addr->sa_data))
2544 		return -EADDRNOTAVAIL;
2545 
2546 	eth_hw_addr_set(netdev, addr->sa_data);
2547 
2548 	addr_lo = netdev->dev_addr[0] |
2549 		  netdev->dev_addr[1] << 8 |
2550 		  netdev->dev_addr[2] << 16 |
2551 		  netdev->dev_addr[3] << 24;
2552 	addr_hi = netdev->dev_addr[4] |
2553 		  netdev->dev_addr[5] << 8;
2554 
2555 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2556 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2557 
2558 	/* Added to support MAC address changes */
2559 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2560 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2561 
2562 	return 0;
2563 }
2564 
2565 /* Enable or disable Rx checksum offload engine */
2566 static int lan78xx_set_features(struct net_device *netdev,
2567 				netdev_features_t features)
2568 {
2569 	struct lan78xx_net *dev = netdev_priv(netdev);
2570 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2571 	unsigned long flags;
2572 
2573 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2574 
2575 	if (features & NETIF_F_RXCSUM) {
2576 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2577 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2578 	} else {
2579 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2580 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2581 	}
2582 
2583 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2584 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2585 	else
2586 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2587 
2588 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2589 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2590 	else
2591 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2592 
2593 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2594 
2595 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2596 
2597 	return 0;
2598 }
2599 
2600 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2601 {
2602 	struct lan78xx_priv *pdata =
2603 			container_of(param, struct lan78xx_priv, set_vlan);
2604 	struct lan78xx_net *dev = pdata->dev;
2605 
2606 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2607 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2608 }
2609 
2610 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2611 				   __be16 proto, u16 vid)
2612 {
2613 	struct lan78xx_net *dev = netdev_priv(netdev);
2614 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2615 	u16 vid_bit_index;
2616 	u16 vid_dword_index;
2617 
2618 	vid_dword_index = (vid >> 5) & 0x7F;
2619 	vid_bit_index = vid & 0x1F;
2620 
2621 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2622 
2623 	/* defer register writes to a sleepable context */
2624 	schedule_work(&pdata->set_vlan);
2625 
2626 	return 0;
2627 }
2628 
2629 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2630 				    __be16 proto, u16 vid)
2631 {
2632 	struct lan78xx_net *dev = netdev_priv(netdev);
2633 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2634 	u16 vid_bit_index;
2635 	u16 vid_dword_index;
2636 
2637 	vid_dword_index = (vid >> 5) & 0x7F;
2638 	vid_bit_index = vid & 0x1F;
2639 
2640 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2641 
2642 	/* defer register writes to a sleepable context */
2643 	schedule_work(&pdata->set_vlan);
2644 
2645 	return 0;
2646 }
2647 
2648 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2649 {
2650 	int ret;
2651 	u32 buf;
2652 	u32 regs[6] = { 0 };
2653 
2654 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2655 	if (buf & USB_CFG1_LTM_ENABLE_) {
2656 		u8 temp[2];
2657 		/* Get values from EEPROM first */
2658 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2659 			if (temp[0] == 24) {
2660 				ret = lan78xx_read_raw_eeprom(dev,
2661 							      temp[1] * 2,
2662 							      24,
2663 							      (u8 *)regs);
2664 				if (ret < 0)
2665 					return;
2666 			}
2667 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2668 			if (temp[0] == 24) {
2669 				ret = lan78xx_read_raw_otp(dev,
2670 							   temp[1] * 2,
2671 							   24,
2672 							   (u8 *)regs);
2673 				if (ret < 0)
2674 					return;
2675 			}
2676 		}
2677 	}
2678 
2679 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2680 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2681 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2682 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2683 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2684 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2685 }
2686 
2687 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2688 {
2689 	int result = 0;
2690 
2691 	switch (dev->udev->speed) {
2692 	case USB_SPEED_SUPER:
2693 		dev->rx_urb_size = RX_SS_URB_SIZE;
2694 		dev->tx_urb_size = TX_SS_URB_SIZE;
2695 		dev->n_rx_urbs = RX_SS_URB_NUM;
2696 		dev->n_tx_urbs = TX_SS_URB_NUM;
2697 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2698 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2699 		break;
2700 	case USB_SPEED_HIGH:
2701 		dev->rx_urb_size = RX_HS_URB_SIZE;
2702 		dev->tx_urb_size = TX_HS_URB_SIZE;
2703 		dev->n_rx_urbs = RX_HS_URB_NUM;
2704 		dev->n_tx_urbs = TX_HS_URB_NUM;
2705 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2706 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2707 		break;
2708 	case USB_SPEED_FULL:
2709 		dev->rx_urb_size = RX_FS_URB_SIZE;
2710 		dev->tx_urb_size = TX_FS_URB_SIZE;
2711 		dev->n_rx_urbs = RX_FS_URB_NUM;
2712 		dev->n_tx_urbs = TX_FS_URB_NUM;
2713 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2714 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2715 		break;
2716 	default:
2717 		netdev_warn(dev->net, "USB bus speed not supported\n");
2718 		result = -EIO;
2719 		break;
2720 	}
2721 
2722 	return result;
2723 }
2724 
2725 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2726 {
2727 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2728 }
2729 
2730 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2731 			   u32 hw_disabled)
2732 {
2733 	unsigned long timeout;
2734 	bool stopped = true;
2735 	int ret;
2736 	u32 buf;
2737 
2738 	/* Stop the h/w block (if not already stopped) */
2739 
2740 	ret = lan78xx_read_reg(dev, reg, &buf);
2741 	if (ret < 0)
2742 		return ret;
2743 
2744 	if (buf & hw_enabled) {
2745 		buf &= ~hw_enabled;
2746 
2747 		ret = lan78xx_write_reg(dev, reg, buf);
2748 		if (ret < 0)
2749 			return ret;
2750 
2751 		stopped = false;
2752 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2753 		do  {
2754 			ret = lan78xx_read_reg(dev, reg, &buf);
2755 			if (ret < 0)
2756 				return ret;
2757 
2758 			if (buf & hw_disabled)
2759 				stopped = true;
2760 			else
2761 				msleep(HW_DISABLE_DELAY_MS);
2762 		} while (!stopped && !time_after(jiffies, timeout));
2763 	}
2764 
2765 	ret = stopped ? 0 : -ETIME;
2766 
2767 	return ret;
2768 }
2769 
2770 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2771 {
2772 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2773 }
2774 
2775 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2776 {
2777 	int ret;
2778 
2779 	netif_dbg(dev, drv, dev->net, "start tx path");
2780 
2781 	/* Start the MAC transmitter */
2782 
2783 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2784 	if (ret < 0)
2785 		return ret;
2786 
2787 	/* Start the Tx FIFO */
2788 
2789 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2790 	if (ret < 0)
2791 		return ret;
2792 
2793 	return 0;
2794 }
2795 
2796 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2797 {
2798 	int ret;
2799 
2800 	netif_dbg(dev, drv, dev->net, "stop tx path");
2801 
2802 	/* Stop the Tx FIFO */
2803 
2804 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2805 	if (ret < 0)
2806 		return ret;
2807 
2808 	/* Stop the MAC transmitter */
2809 
2810 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2811 	if (ret < 0)
2812 		return ret;
2813 
2814 	return 0;
2815 }
2816 
2817 /* The caller must ensure the Tx path is stopped before calling
2818  * lan78xx_flush_tx_fifo().
2819  */
2820 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2821 {
2822 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2823 }
2824 
2825 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2826 {
2827 	int ret;
2828 
2829 	netif_dbg(dev, drv, dev->net, "start rx path");
2830 
2831 	/* Start the Rx FIFO */
2832 
2833 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	/* Start the MAC receiver*/
2838 
2839 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2840 	if (ret < 0)
2841 		return ret;
2842 
2843 	return 0;
2844 }
2845 
2846 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2847 {
2848 	int ret;
2849 
2850 	netif_dbg(dev, drv, dev->net, "stop rx path");
2851 
2852 	/* Stop the MAC receiver */
2853 
2854 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2855 	if (ret < 0)
2856 		return ret;
2857 
2858 	/* Stop the Rx FIFO */
2859 
2860 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2861 	if (ret < 0)
2862 		return ret;
2863 
2864 	return 0;
2865 }
2866 
2867 /* The caller must ensure the Rx path is stopped before calling
2868  * lan78xx_flush_rx_fifo().
2869  */
2870 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2871 {
2872 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2873 }
2874 
2875 static int lan78xx_reset(struct lan78xx_net *dev)
2876 {
2877 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2878 	unsigned long timeout;
2879 	int ret;
2880 	u32 buf;
2881 	u8 sig;
2882 
2883 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2884 	if (ret < 0)
2885 		return ret;
2886 
2887 	buf |= HW_CFG_LRST_;
2888 
2889 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2890 	if (ret < 0)
2891 		return ret;
2892 
2893 	timeout = jiffies + HZ;
2894 	do {
2895 		mdelay(1);
2896 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2897 		if (ret < 0)
2898 			return ret;
2899 
2900 		if (time_after(jiffies, timeout)) {
2901 			netdev_warn(dev->net,
2902 				    "timeout on completion of LiteReset");
2903 			ret = -ETIMEDOUT;
2904 			return ret;
2905 		}
2906 	} while (buf & HW_CFG_LRST_);
2907 
2908 	lan78xx_init_mac_address(dev);
2909 
2910 	/* save DEVID for later usage */
2911 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2912 	if (ret < 0)
2913 		return ret;
2914 
2915 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2916 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2917 
2918 	/* Respond to the IN token with a NAK */
2919 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2920 	if (ret < 0)
2921 		return ret;
2922 
2923 	buf |= USB_CFG_BIR_;
2924 
2925 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	/* Init LTM */
2930 	lan78xx_init_ltm(dev);
2931 
2932 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2933 	if (ret < 0)
2934 		return ret;
2935 
2936 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2937 	if (ret < 0)
2938 		return ret;
2939 
2940 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2941 	if (ret < 0)
2942 		return ret;
2943 
2944 	buf |= HW_CFG_MEF_;
2945 
2946 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2947 	if (ret < 0)
2948 		return ret;
2949 
2950 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2951 	if (ret < 0)
2952 		return ret;
2953 
2954 	buf |= USB_CFG_BCE_;
2955 
2956 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2957 	if (ret < 0)
2958 		return ret;
2959 
2960 	/* set FIFO sizes */
2961 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2962 
2963 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2964 	if (ret < 0)
2965 		return ret;
2966 
2967 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2968 
2969 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2970 	if (ret < 0)
2971 		return ret;
2972 
2973 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2974 	if (ret < 0)
2975 		return ret;
2976 
2977 	ret = lan78xx_write_reg(dev, FLOW, 0);
2978 	if (ret < 0)
2979 		return ret;
2980 
2981 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2982 	if (ret < 0)
2983 		return ret;
2984 
2985 	/* Don't need rfe_ctl_lock during initialisation */
2986 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2991 
2992 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2993 	if (ret < 0)
2994 		return ret;
2995 
2996 	/* Enable or disable checksum offload engines */
2997 	ret = lan78xx_set_features(dev->net, dev->net->features);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	lan78xx_set_multicast(dev->net);
3002 
3003 	/* reset PHY */
3004 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3005 	if (ret < 0)
3006 		return ret;
3007 
3008 	buf |= PMT_CTL_PHY_RST_;
3009 
3010 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	timeout = jiffies + HZ;
3015 	do {
3016 		mdelay(1);
3017 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3018 		if (ret < 0)
3019 			return ret;
3020 
3021 		if (time_after(jiffies, timeout)) {
3022 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3023 			ret = -ETIMEDOUT;
3024 			return ret;
3025 		}
3026 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3027 
3028 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3029 	if (ret < 0)
3030 		return ret;
3031 
3032 	/* LAN7801 only has RGMII mode */
3033 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3034 		buf &= ~MAC_CR_GMII_EN_;
3035 
3036 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3037 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3038 		if (!ret && sig != EEPROM_INDICATOR) {
3039 			/* Implies there is no external eeprom. Set mac speed */
3040 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3041 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3042 		}
3043 	}
3044 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3045 	if (ret < 0)
3046 		return ret;
3047 
3048 	ret = lan78xx_set_rx_max_frame_length(dev,
3049 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3050 
3051 	return ret;
3052 }
3053 
3054 static void lan78xx_init_stats(struct lan78xx_net *dev)
3055 {
3056 	u32 *p;
3057 	int i;
3058 
3059 	/* initialize for stats update
3060 	 * some counters are 20bits and some are 32bits
3061 	 */
3062 	p = (u32 *)&dev->stats.rollover_max;
3063 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3064 		p[i] = 0xFFFFF;
3065 
3066 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3067 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3068 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3069 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3070 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3071 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3072 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3073 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3074 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3075 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3076 
3077 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3078 }
3079 
3080 static int lan78xx_open(struct net_device *net)
3081 {
3082 	struct lan78xx_net *dev = netdev_priv(net);
3083 	int ret;
3084 
3085 	netif_dbg(dev, ifup, dev->net, "open device");
3086 
3087 	ret = usb_autopm_get_interface(dev->intf);
3088 	if (ret < 0)
3089 		return ret;
3090 
3091 	mutex_lock(&dev->dev_mutex);
3092 
3093 	phy_start(net->phydev);
3094 
3095 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3096 
3097 	/* for Link Check */
3098 	if (dev->urb_intr) {
3099 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3100 		if (ret < 0) {
3101 			netif_err(dev, ifup, dev->net,
3102 				  "intr submit %d\n", ret);
3103 			goto done;
3104 		}
3105 	}
3106 
3107 	ret = lan78xx_flush_rx_fifo(dev);
3108 	if (ret < 0)
3109 		goto done;
3110 	ret = lan78xx_flush_tx_fifo(dev);
3111 	if (ret < 0)
3112 		goto done;
3113 
3114 	ret = lan78xx_start_tx_path(dev);
3115 	if (ret < 0)
3116 		goto done;
3117 	ret = lan78xx_start_rx_path(dev);
3118 	if (ret < 0)
3119 		goto done;
3120 
3121 	lan78xx_init_stats(dev);
3122 
3123 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3124 
3125 	netif_start_queue(net);
3126 
3127 	dev->link_on = false;
3128 
3129 	napi_enable(&dev->napi);
3130 
3131 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3132 done:
3133 	mutex_unlock(&dev->dev_mutex);
3134 
3135 	usb_autopm_put_interface(dev->intf);
3136 
3137 	return ret;
3138 }
3139 
3140 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3141 {
3142 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3143 	DECLARE_WAITQUEUE(wait, current);
3144 	int temp;
3145 
3146 	/* ensure there are no more active urbs */
3147 	add_wait_queue(&unlink_wakeup, &wait);
3148 	set_current_state(TASK_UNINTERRUPTIBLE);
3149 	dev->wait = &unlink_wakeup;
3150 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3151 
3152 	/* maybe wait for deletions to finish. */
3153 	while (!skb_queue_empty(&dev->rxq) ||
3154 	       !skb_queue_empty(&dev->txq)) {
3155 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3156 		set_current_state(TASK_UNINTERRUPTIBLE);
3157 		netif_dbg(dev, ifdown, dev->net,
3158 			  "waited for %d urb completions", temp);
3159 	}
3160 	set_current_state(TASK_RUNNING);
3161 	dev->wait = NULL;
3162 	remove_wait_queue(&unlink_wakeup, &wait);
3163 
3164 	/* empty Rx done, Rx overflow and Tx pend queues
3165 	 */
3166 	while (!skb_queue_empty(&dev->rxq_done)) {
3167 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3168 
3169 		lan78xx_release_rx_buf(dev, skb);
3170 	}
3171 
3172 	skb_queue_purge(&dev->rxq_overflow);
3173 	skb_queue_purge(&dev->txq_pend);
3174 }
3175 
3176 static int lan78xx_stop(struct net_device *net)
3177 {
3178 	struct lan78xx_net *dev = netdev_priv(net);
3179 
3180 	netif_dbg(dev, ifup, dev->net, "stop device");
3181 
3182 	mutex_lock(&dev->dev_mutex);
3183 
3184 	if (timer_pending(&dev->stat_monitor))
3185 		del_timer_sync(&dev->stat_monitor);
3186 
3187 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3188 	netif_stop_queue(net);
3189 	napi_disable(&dev->napi);
3190 
3191 	lan78xx_terminate_urbs(dev);
3192 
3193 	netif_info(dev, ifdown, dev->net,
3194 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3195 		   net->stats.rx_packets, net->stats.tx_packets,
3196 		   net->stats.rx_errors, net->stats.tx_errors);
3197 
3198 	/* ignore errors that occur stopping the Tx and Rx data paths */
3199 	lan78xx_stop_tx_path(dev);
3200 	lan78xx_stop_rx_path(dev);
3201 
3202 	if (net->phydev)
3203 		phy_stop(net->phydev);
3204 
3205 	usb_kill_urb(dev->urb_intr);
3206 
3207 	/* deferred work (task, timer, softirq) must also stop.
3208 	 * can't flush_scheduled_work() until we drop rtnl (later),
3209 	 * else workers could deadlock; so make workers a NOP.
3210 	 */
3211 	clear_bit(EVENT_TX_HALT, &dev->flags);
3212 	clear_bit(EVENT_RX_HALT, &dev->flags);
3213 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3214 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3215 
3216 	cancel_delayed_work_sync(&dev->wq);
3217 
3218 	usb_autopm_put_interface(dev->intf);
3219 
3220 	mutex_unlock(&dev->dev_mutex);
3221 
3222 	return 0;
3223 }
3224 
3225 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3226 			       struct sk_buff_head *list, enum skb_state state)
3227 {
3228 	unsigned long flags;
3229 	enum skb_state old_state;
3230 	struct skb_data *entry = (struct skb_data *)skb->cb;
3231 
3232 	spin_lock_irqsave(&list->lock, flags);
3233 	old_state = entry->state;
3234 	entry->state = state;
3235 
3236 	__skb_unlink(skb, list);
3237 	spin_unlock(&list->lock);
3238 	spin_lock(&dev->rxq_done.lock);
3239 
3240 	__skb_queue_tail(&dev->rxq_done, skb);
3241 	if (skb_queue_len(&dev->rxq_done) == 1)
3242 		napi_schedule(&dev->napi);
3243 
3244 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3245 
3246 	return old_state;
3247 }
3248 
3249 static void tx_complete(struct urb *urb)
3250 {
3251 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3252 	struct skb_data *entry = (struct skb_data *)skb->cb;
3253 	struct lan78xx_net *dev = entry->dev;
3254 
3255 	if (urb->status == 0) {
3256 		dev->net->stats.tx_packets += entry->num_of_packet;
3257 		dev->net->stats.tx_bytes += entry->length;
3258 	} else {
3259 		dev->net->stats.tx_errors += entry->num_of_packet;
3260 
3261 		switch (urb->status) {
3262 		case -EPIPE:
3263 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3264 			break;
3265 
3266 		/* software-driven interface shutdown */
3267 		case -ECONNRESET:
3268 		case -ESHUTDOWN:
3269 			netif_dbg(dev, tx_err, dev->net,
3270 				  "tx err interface gone %d\n",
3271 				  entry->urb->status);
3272 			break;
3273 
3274 		case -EPROTO:
3275 		case -ETIME:
3276 		case -EILSEQ:
3277 			netif_stop_queue(dev->net);
3278 			netif_dbg(dev, tx_err, dev->net,
3279 				  "tx err queue stopped %d\n",
3280 				  entry->urb->status);
3281 			break;
3282 		default:
3283 			netif_dbg(dev, tx_err, dev->net,
3284 				  "unknown tx err %d\n",
3285 				  entry->urb->status);
3286 			break;
3287 		}
3288 	}
3289 
3290 	usb_autopm_put_interface_async(dev->intf);
3291 
3292 	skb_unlink(skb, &dev->txq);
3293 
3294 	lan78xx_release_tx_buf(dev, skb);
3295 
3296 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3297 	 */
3298 	if (skb_queue_empty(&dev->txq) &&
3299 	    !skb_queue_empty(&dev->txq_pend))
3300 		napi_schedule(&dev->napi);
3301 }
3302 
3303 static void lan78xx_queue_skb(struct sk_buff_head *list,
3304 			      struct sk_buff *newsk, enum skb_state state)
3305 {
3306 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3307 
3308 	__skb_queue_tail(list, newsk);
3309 	entry->state = state;
3310 }
3311 
3312 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3313 {
3314 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3315 }
3316 
3317 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3318 {
3319 	return dev->tx_pend_data_len;
3320 }
3321 
3322 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3323 				    struct sk_buff *skb,
3324 				    unsigned int *tx_pend_data_len)
3325 {
3326 	unsigned long flags;
3327 
3328 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3329 
3330 	__skb_queue_tail(&dev->txq_pend, skb);
3331 
3332 	dev->tx_pend_data_len += skb->len;
3333 	*tx_pend_data_len = dev->tx_pend_data_len;
3334 
3335 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3336 }
3337 
3338 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3339 					 struct sk_buff *skb,
3340 					 unsigned int *tx_pend_data_len)
3341 {
3342 	unsigned long flags;
3343 
3344 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3345 
3346 	__skb_queue_head(&dev->txq_pend, skb);
3347 
3348 	dev->tx_pend_data_len += skb->len;
3349 	*tx_pend_data_len = dev->tx_pend_data_len;
3350 
3351 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3352 }
3353 
3354 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3355 				    struct sk_buff **skb,
3356 				    unsigned int *tx_pend_data_len)
3357 {
3358 	unsigned long flags;
3359 
3360 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3361 
3362 	*skb = __skb_dequeue(&dev->txq_pend);
3363 	if (*skb)
3364 		dev->tx_pend_data_len -= (*skb)->len;
3365 	*tx_pend_data_len = dev->tx_pend_data_len;
3366 
3367 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3368 }
3369 
3370 static netdev_tx_t
3371 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3372 {
3373 	struct lan78xx_net *dev = netdev_priv(net);
3374 	unsigned int tx_pend_data_len;
3375 
3376 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3377 		schedule_delayed_work(&dev->wq, 0);
3378 
3379 	skb_tx_timestamp(skb);
3380 
3381 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3382 
3383 	/* Set up a Tx URB if none is in progress */
3384 
3385 	if (skb_queue_empty(&dev->txq))
3386 		napi_schedule(&dev->napi);
3387 
3388 	/* Stop stack Tx queue if we have enough data to fill
3389 	 * all the free Tx URBs.
3390 	 */
3391 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3392 		netif_stop_queue(net);
3393 
3394 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3395 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3396 
3397 		/* Kick off transmission of pending data */
3398 
3399 		if (!skb_queue_empty(&dev->txq_free))
3400 			napi_schedule(&dev->napi);
3401 	}
3402 
3403 	return NETDEV_TX_OK;
3404 }
3405 
3406 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3407 {
3408 	struct lan78xx_priv *pdata = NULL;
3409 	int ret;
3410 	int i;
3411 
3412 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3413 
3414 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3415 	if (!pdata) {
3416 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3417 		return -ENOMEM;
3418 	}
3419 
3420 	pdata->dev = dev;
3421 
3422 	spin_lock_init(&pdata->rfe_ctl_lock);
3423 	mutex_init(&pdata->dataport_mutex);
3424 
3425 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3426 
3427 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3428 		pdata->vlan_table[i] = 0;
3429 
3430 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3431 
3432 	dev->net->features = 0;
3433 
3434 	if (DEFAULT_TX_CSUM_ENABLE)
3435 		dev->net->features |= NETIF_F_HW_CSUM;
3436 
3437 	if (DEFAULT_RX_CSUM_ENABLE)
3438 		dev->net->features |= NETIF_F_RXCSUM;
3439 
3440 	if (DEFAULT_TSO_CSUM_ENABLE)
3441 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3442 
3443 	if (DEFAULT_VLAN_RX_OFFLOAD)
3444 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3445 
3446 	if (DEFAULT_VLAN_FILTER_ENABLE)
3447 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3448 
3449 	dev->net->hw_features = dev->net->features;
3450 
3451 	ret = lan78xx_setup_irq_domain(dev);
3452 	if (ret < 0) {
3453 		netdev_warn(dev->net,
3454 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3455 		goto out1;
3456 	}
3457 
3458 	/* Init all registers */
3459 	ret = lan78xx_reset(dev);
3460 	if (ret) {
3461 		netdev_warn(dev->net, "Registers INIT FAILED....");
3462 		goto out2;
3463 	}
3464 
3465 	ret = lan78xx_mdio_init(dev);
3466 	if (ret) {
3467 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3468 		goto out2;
3469 	}
3470 
3471 	dev->net->flags |= IFF_MULTICAST;
3472 
3473 	pdata->wol = WAKE_MAGIC;
3474 
3475 	return ret;
3476 
3477 out2:
3478 	lan78xx_remove_irq_domain(dev);
3479 
3480 out1:
3481 	netdev_warn(dev->net, "Bind routine FAILED");
3482 	cancel_work_sync(&pdata->set_multicast);
3483 	cancel_work_sync(&pdata->set_vlan);
3484 	kfree(pdata);
3485 	return ret;
3486 }
3487 
3488 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3489 {
3490 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3491 
3492 	lan78xx_remove_irq_domain(dev);
3493 
3494 	lan78xx_remove_mdio(dev);
3495 
3496 	if (pdata) {
3497 		cancel_work_sync(&pdata->set_multicast);
3498 		cancel_work_sync(&pdata->set_vlan);
3499 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3500 		kfree(pdata);
3501 		pdata = NULL;
3502 		dev->data[0] = 0;
3503 	}
3504 }
3505 
3506 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3507 				    struct sk_buff *skb,
3508 				    u32 rx_cmd_a, u32 rx_cmd_b)
3509 {
3510 	/* HW Checksum offload appears to be flawed if used when not stripping
3511 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3512 	 */
3513 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3514 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3515 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3516 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3517 		skb->ip_summed = CHECKSUM_NONE;
3518 	} else {
3519 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3520 		skb->ip_summed = CHECKSUM_COMPLETE;
3521 	}
3522 }
3523 
3524 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3525 				    struct sk_buff *skb,
3526 				    u32 rx_cmd_a, u32 rx_cmd_b)
3527 {
3528 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3529 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3530 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3531 				       (rx_cmd_b & 0xffff));
3532 }
3533 
3534 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3535 {
3536 	dev->net->stats.rx_packets++;
3537 	dev->net->stats.rx_bytes += skb->len;
3538 
3539 	skb->protocol = eth_type_trans(skb, dev->net);
3540 
3541 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3542 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3543 	memset(skb->cb, 0, sizeof(struct skb_data));
3544 
3545 	if (skb_defer_rx_timestamp(skb))
3546 		return;
3547 
3548 	napi_gro_receive(&dev->napi, skb);
3549 }
3550 
3551 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3552 		      int budget, int *work_done)
3553 {
3554 	if (skb->len < RX_SKB_MIN_LEN)
3555 		return 0;
3556 
3557 	/* Extract frames from the URB buffer and pass each one to
3558 	 * the stack in a new NAPI SKB.
3559 	 */
3560 	while (skb->len > 0) {
3561 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3562 		u16 rx_cmd_c;
3563 		unsigned char *packet;
3564 
3565 		rx_cmd_a = get_unaligned_le32(skb->data);
3566 		skb_pull(skb, sizeof(rx_cmd_a));
3567 
3568 		rx_cmd_b = get_unaligned_le32(skb->data);
3569 		skb_pull(skb, sizeof(rx_cmd_b));
3570 
3571 		rx_cmd_c = get_unaligned_le16(skb->data);
3572 		skb_pull(skb, sizeof(rx_cmd_c));
3573 
3574 		packet = skb->data;
3575 
3576 		/* get the packet length */
3577 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3578 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3579 
3580 		if (unlikely(size > skb->len)) {
3581 			netif_dbg(dev, rx_err, dev->net,
3582 				  "size err rx_cmd_a=0x%08x\n",
3583 				  rx_cmd_a);
3584 			return 0;
3585 		}
3586 
3587 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3588 			netif_dbg(dev, rx_err, dev->net,
3589 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3590 		} else {
3591 			u32 frame_len;
3592 			struct sk_buff *skb2;
3593 
3594 			if (unlikely(size < ETH_FCS_LEN)) {
3595 				netif_dbg(dev, rx_err, dev->net,
3596 					  "size err rx_cmd_a=0x%08x\n",
3597 					  rx_cmd_a);
3598 				return 0;
3599 			}
3600 
3601 			frame_len = size - ETH_FCS_LEN;
3602 
3603 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3604 			if (!skb2)
3605 				return 0;
3606 
3607 			memcpy(skb2->data, packet, frame_len);
3608 
3609 			skb_put(skb2, frame_len);
3610 
3611 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3612 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3613 
3614 			/* Processing of the URB buffer must complete once
3615 			 * it has started. If the NAPI work budget is exhausted
3616 			 * while frames remain they are added to the overflow
3617 			 * queue for delivery in the next NAPI polling cycle.
3618 			 */
3619 			if (*work_done < budget) {
3620 				lan78xx_skb_return(dev, skb2);
3621 				++(*work_done);
3622 			} else {
3623 				skb_queue_tail(&dev->rxq_overflow, skb2);
3624 			}
3625 		}
3626 
3627 		skb_pull(skb, size);
3628 
3629 		/* skip padding bytes before the next frame starts */
3630 		if (skb->len)
3631 			skb_pull(skb, align_count);
3632 	}
3633 
3634 	return 1;
3635 }
3636 
3637 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3638 			      int budget, int *work_done)
3639 {
3640 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3641 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3642 		dev->net->stats.rx_errors++;
3643 	}
3644 }
3645 
3646 static void rx_complete(struct urb *urb)
3647 {
3648 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3649 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3650 	struct lan78xx_net *dev = entry->dev;
3651 	int urb_status = urb->status;
3652 	enum skb_state state;
3653 
3654 	netif_dbg(dev, rx_status, dev->net,
3655 		  "rx done: status %d", urb->status);
3656 
3657 	skb_put(skb, urb->actual_length);
3658 	state = rx_done;
3659 
3660 	if (urb != entry->urb)
3661 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3662 
3663 	switch (urb_status) {
3664 	case 0:
3665 		if (skb->len < RX_SKB_MIN_LEN) {
3666 			state = rx_cleanup;
3667 			dev->net->stats.rx_errors++;
3668 			dev->net->stats.rx_length_errors++;
3669 			netif_dbg(dev, rx_err, dev->net,
3670 				  "rx length %d\n", skb->len);
3671 		}
3672 		usb_mark_last_busy(dev->udev);
3673 		break;
3674 	case -EPIPE:
3675 		dev->net->stats.rx_errors++;
3676 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3677 		fallthrough;
3678 	case -ECONNRESET:				/* async unlink */
3679 	case -ESHUTDOWN:				/* hardware gone */
3680 		netif_dbg(dev, ifdown, dev->net,
3681 			  "rx shutdown, code %d\n", urb_status);
3682 		state = rx_cleanup;
3683 		break;
3684 	case -EPROTO:
3685 	case -ETIME:
3686 	case -EILSEQ:
3687 		dev->net->stats.rx_errors++;
3688 		state = rx_cleanup;
3689 		break;
3690 
3691 	/* data overrun ... flush fifo? */
3692 	case -EOVERFLOW:
3693 		dev->net->stats.rx_over_errors++;
3694 		fallthrough;
3695 
3696 	default:
3697 		state = rx_cleanup;
3698 		dev->net->stats.rx_errors++;
3699 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3700 		break;
3701 	}
3702 
3703 	state = defer_bh(dev, skb, &dev->rxq, state);
3704 }
3705 
3706 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3707 {
3708 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3709 	size_t size = dev->rx_urb_size;
3710 	struct urb *urb = entry->urb;
3711 	unsigned long lockflags;
3712 	int ret = 0;
3713 
3714 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3715 			  skb->data, size, rx_complete, skb);
3716 
3717 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3718 
3719 	if (netif_device_present(dev->net) &&
3720 	    netif_running(dev->net) &&
3721 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3722 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3723 		ret = usb_submit_urb(urb, flags);
3724 		switch (ret) {
3725 		case 0:
3726 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3727 			break;
3728 		case -EPIPE:
3729 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3730 			break;
3731 		case -ENODEV:
3732 		case -ENOENT:
3733 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3734 			netif_device_detach(dev->net);
3735 			break;
3736 		case -EHOSTUNREACH:
3737 			ret = -ENOLINK;
3738 			napi_schedule(&dev->napi);
3739 			break;
3740 		default:
3741 			netif_dbg(dev, rx_err, dev->net,
3742 				  "rx submit, %d\n", ret);
3743 			napi_schedule(&dev->napi);
3744 			break;
3745 		}
3746 	} else {
3747 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3748 		ret = -ENOLINK;
3749 	}
3750 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3751 
3752 	if (ret)
3753 		lan78xx_release_rx_buf(dev, skb);
3754 
3755 	return ret;
3756 }
3757 
3758 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3759 {
3760 	struct sk_buff *rx_buf;
3761 
3762 	/* Ensure the maximum number of Rx URBs is submitted
3763 	 */
3764 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3765 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3766 			break;
3767 	}
3768 }
3769 
3770 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3771 				    struct sk_buff *rx_buf)
3772 {
3773 	/* reset SKB data pointers */
3774 
3775 	rx_buf->data = rx_buf->head;
3776 	skb_reset_tail_pointer(rx_buf);
3777 	rx_buf->len = 0;
3778 	rx_buf->data_len = 0;
3779 
3780 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3781 }
3782 
3783 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3784 {
3785 	u32 tx_cmd_a;
3786 	u32 tx_cmd_b;
3787 
3788 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3789 
3790 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3791 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3792 
3793 	tx_cmd_b = 0;
3794 	if (skb_is_gso(skb)) {
3795 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3796 
3797 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3798 
3799 		tx_cmd_a |= TX_CMD_A_LSO_;
3800 	}
3801 
3802 	if (skb_vlan_tag_present(skb)) {
3803 		tx_cmd_a |= TX_CMD_A_IVTG_;
3804 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3805 	}
3806 
3807 	put_unaligned_le32(tx_cmd_a, buffer);
3808 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3809 }
3810 
3811 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3812 					    struct sk_buff *tx_buf)
3813 {
3814 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3815 	int remain = dev->tx_urb_size;
3816 	u8 *tx_data = tx_buf->data;
3817 	u32 urb_len = 0;
3818 
3819 	entry->num_of_packet = 0;
3820 	entry->length = 0;
3821 
3822 	/* Work through the pending SKBs and copy the data of each SKB into
3823 	 * the URB buffer if there room for all the SKB data.
3824 	 *
3825 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3826 	 */
3827 	while (remain >= TX_SKB_MIN_LEN) {
3828 		unsigned int pending_bytes;
3829 		unsigned int align_bytes;
3830 		struct sk_buff *skb;
3831 		unsigned int len;
3832 
3833 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3834 
3835 		if (!skb)
3836 			break;
3837 
3838 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3839 			      TX_ALIGNMENT;
3840 		len = align_bytes + TX_CMD_LEN + skb->len;
3841 		if (len > remain) {
3842 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3843 			break;
3844 		}
3845 
3846 		tx_data += align_bytes;
3847 
3848 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3849 		tx_data += TX_CMD_LEN;
3850 
3851 		len = skb->len;
3852 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3853 			struct net_device_stats *stats = &dev->net->stats;
3854 
3855 			stats->tx_dropped++;
3856 			dev_kfree_skb_any(skb);
3857 			tx_data -= TX_CMD_LEN;
3858 			continue;
3859 		}
3860 
3861 		tx_data += len;
3862 		entry->length += len;
3863 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3864 
3865 		dev_kfree_skb_any(skb);
3866 
3867 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3868 
3869 		remain = dev->tx_urb_size - urb_len;
3870 	}
3871 
3872 	skb_put(tx_buf, urb_len);
3873 
3874 	return entry;
3875 }
3876 
3877 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3878 {
3879 	int ret;
3880 
3881 	/* Start the stack Tx queue if it was stopped
3882 	 */
3883 	netif_tx_lock(dev->net);
3884 	if (netif_queue_stopped(dev->net)) {
3885 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3886 			netif_wake_queue(dev->net);
3887 	}
3888 	netif_tx_unlock(dev->net);
3889 
3890 	/* Go through the Tx pending queue and set up URBs to transfer
3891 	 * the data to the device. Stop if no more pending data or URBs,
3892 	 * or if an error occurs when a URB is submitted.
3893 	 */
3894 	do {
3895 		struct skb_data *entry;
3896 		struct sk_buff *tx_buf;
3897 		unsigned long flags;
3898 
3899 		if (skb_queue_empty(&dev->txq_pend))
3900 			break;
3901 
3902 		tx_buf = lan78xx_get_tx_buf(dev);
3903 		if (!tx_buf)
3904 			break;
3905 
3906 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3907 
3908 		spin_lock_irqsave(&dev->txq.lock, flags);
3909 		ret = usb_autopm_get_interface_async(dev->intf);
3910 		if (ret < 0) {
3911 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3912 			goto out;
3913 		}
3914 
3915 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3916 				  tx_buf->data, tx_buf->len, tx_complete,
3917 				  tx_buf);
3918 
3919 		if (tx_buf->len % dev->maxpacket == 0) {
3920 			/* send USB_ZERO_PACKET */
3921 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3922 		}
3923 
3924 #ifdef CONFIG_PM
3925 		/* if device is asleep stop outgoing packet processing */
3926 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3927 			usb_anchor_urb(entry->urb, &dev->deferred);
3928 			netif_stop_queue(dev->net);
3929 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3930 			netdev_dbg(dev->net,
3931 				   "Delaying transmission for resumption\n");
3932 			return;
3933 		}
3934 #endif
3935 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3936 		switch (ret) {
3937 		case 0:
3938 			netif_trans_update(dev->net);
3939 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3940 			break;
3941 		case -EPIPE:
3942 			netif_stop_queue(dev->net);
3943 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3944 			usb_autopm_put_interface_async(dev->intf);
3945 			break;
3946 		case -ENODEV:
3947 		case -ENOENT:
3948 			netif_dbg(dev, tx_err, dev->net,
3949 				  "tx submit urb err %d (disconnected?)", ret);
3950 			netif_device_detach(dev->net);
3951 			break;
3952 		default:
3953 			usb_autopm_put_interface_async(dev->intf);
3954 			netif_dbg(dev, tx_err, dev->net,
3955 				  "tx submit urb err %d\n", ret);
3956 			break;
3957 		}
3958 
3959 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3960 
3961 		if (ret) {
3962 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3963 out:
3964 			dev->net->stats.tx_dropped += entry->num_of_packet;
3965 			lan78xx_release_tx_buf(dev, tx_buf);
3966 		}
3967 	} while (ret == 0);
3968 }
3969 
3970 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3971 {
3972 	struct sk_buff_head done;
3973 	struct sk_buff *rx_buf;
3974 	struct skb_data *entry;
3975 	unsigned long flags;
3976 	int work_done = 0;
3977 
3978 	/* Pass frames received in the last NAPI cycle before
3979 	 * working on newly completed URBs.
3980 	 */
3981 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3982 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3983 		++work_done;
3984 	}
3985 
3986 	/* Take a snapshot of the done queue and move items to a
3987 	 * temporary queue. Rx URB completions will continue to add
3988 	 * to the done queue.
3989 	 */
3990 	__skb_queue_head_init(&done);
3991 
3992 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
3993 	skb_queue_splice_init(&dev->rxq_done, &done);
3994 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3995 
3996 	/* Extract receive frames from completed URBs and
3997 	 * pass them to the stack. Re-submit each completed URB.
3998 	 */
3999 	while ((work_done < budget) &&
4000 	       (rx_buf = __skb_dequeue(&done))) {
4001 		entry = (struct skb_data *)(rx_buf->cb);
4002 		switch (entry->state) {
4003 		case rx_done:
4004 			rx_process(dev, rx_buf, budget, &work_done);
4005 			break;
4006 		case rx_cleanup:
4007 			break;
4008 		default:
4009 			netdev_dbg(dev->net, "rx buf state %d\n",
4010 				   entry->state);
4011 			break;
4012 		}
4013 
4014 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4015 	}
4016 
4017 	/* If budget was consumed before processing all the URBs put them
4018 	 * back on the front of the done queue. They will be first to be
4019 	 * processed in the next NAPI cycle.
4020 	 */
4021 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4022 	skb_queue_splice(&done, &dev->rxq_done);
4023 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4024 
4025 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4026 		/* reset update timer delta */
4027 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4028 			dev->delta = 1;
4029 			mod_timer(&dev->stat_monitor,
4030 				  jiffies + STAT_UPDATE_TIMER);
4031 		}
4032 
4033 		/* Submit all free Rx URBs */
4034 
4035 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4036 			lan78xx_rx_urb_submit_all(dev);
4037 
4038 		/* Submit new Tx URBs */
4039 
4040 		lan78xx_tx_bh(dev);
4041 	}
4042 
4043 	return work_done;
4044 }
4045 
4046 static int lan78xx_poll(struct napi_struct *napi, int budget)
4047 {
4048 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4049 	int result = budget;
4050 	int work_done;
4051 
4052 	/* Don't do any work if the device is suspended */
4053 
4054 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4055 		napi_complete_done(napi, 0);
4056 		return 0;
4057 	}
4058 
4059 	/* Process completed URBs and submit new URBs */
4060 
4061 	work_done = lan78xx_bh(dev, budget);
4062 
4063 	if (work_done < budget) {
4064 		napi_complete_done(napi, work_done);
4065 
4066 		/* Start a new polling cycle if data was received or
4067 		 * data is waiting to be transmitted.
4068 		 */
4069 		if (!skb_queue_empty(&dev->rxq_done)) {
4070 			napi_schedule(napi);
4071 		} else if (netif_carrier_ok(dev->net)) {
4072 			if (skb_queue_empty(&dev->txq) &&
4073 			    !skb_queue_empty(&dev->txq_pend)) {
4074 				napi_schedule(napi);
4075 			} else {
4076 				netif_tx_lock(dev->net);
4077 				if (netif_queue_stopped(dev->net)) {
4078 					netif_wake_queue(dev->net);
4079 					napi_schedule(napi);
4080 				}
4081 				netif_tx_unlock(dev->net);
4082 			}
4083 		}
4084 		result = work_done;
4085 	}
4086 
4087 	return result;
4088 }
4089 
4090 static void lan78xx_delayedwork(struct work_struct *work)
4091 {
4092 	int status;
4093 	struct lan78xx_net *dev;
4094 
4095 	dev = container_of(work, struct lan78xx_net, wq.work);
4096 
4097 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4098 		return;
4099 
4100 	if (usb_autopm_get_interface(dev->intf) < 0)
4101 		return;
4102 
4103 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4104 		unlink_urbs(dev, &dev->txq);
4105 
4106 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4107 		if (status < 0 &&
4108 		    status != -EPIPE &&
4109 		    status != -ESHUTDOWN) {
4110 			if (netif_msg_tx_err(dev))
4111 				netdev_err(dev->net,
4112 					   "can't clear tx halt, status %d\n",
4113 					   status);
4114 		} else {
4115 			clear_bit(EVENT_TX_HALT, &dev->flags);
4116 			if (status != -ESHUTDOWN)
4117 				netif_wake_queue(dev->net);
4118 		}
4119 	}
4120 
4121 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4122 		unlink_urbs(dev, &dev->rxq);
4123 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4124 		if (status < 0 &&
4125 		    status != -EPIPE &&
4126 		    status != -ESHUTDOWN) {
4127 			if (netif_msg_rx_err(dev))
4128 				netdev_err(dev->net,
4129 					   "can't clear rx halt, status %d\n",
4130 					   status);
4131 		} else {
4132 			clear_bit(EVENT_RX_HALT, &dev->flags);
4133 			napi_schedule(&dev->napi);
4134 		}
4135 	}
4136 
4137 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4138 		int ret = 0;
4139 
4140 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4141 		if (lan78xx_link_reset(dev) < 0) {
4142 			netdev_info(dev->net, "link reset failed (%d)\n",
4143 				    ret);
4144 		}
4145 	}
4146 
4147 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4148 		lan78xx_update_stats(dev);
4149 
4150 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4151 
4152 		mod_timer(&dev->stat_monitor,
4153 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4154 
4155 		dev->delta = min((dev->delta * 2), 50);
4156 	}
4157 
4158 	usb_autopm_put_interface(dev->intf);
4159 }
4160 
4161 static void intr_complete(struct urb *urb)
4162 {
4163 	struct lan78xx_net *dev = urb->context;
4164 	int status = urb->status;
4165 
4166 	switch (status) {
4167 	/* success */
4168 	case 0:
4169 		lan78xx_status(dev, urb);
4170 		break;
4171 
4172 	/* software-driven interface shutdown */
4173 	case -ENOENT:			/* urb killed */
4174 	case -ENODEV:			/* hardware gone */
4175 	case -ESHUTDOWN:		/* hardware gone */
4176 		netif_dbg(dev, ifdown, dev->net,
4177 			  "intr shutdown, code %d\n", status);
4178 		return;
4179 
4180 	/* NOTE:  not throttling like RX/TX, since this endpoint
4181 	 * already polls infrequently
4182 	 */
4183 	default:
4184 		netdev_dbg(dev->net, "intr status %d\n", status);
4185 		break;
4186 	}
4187 
4188 	if (!netif_device_present(dev->net) ||
4189 	    !netif_running(dev->net)) {
4190 		netdev_warn(dev->net, "not submitting new status URB");
4191 		return;
4192 	}
4193 
4194 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4195 	status = usb_submit_urb(urb, GFP_ATOMIC);
4196 
4197 	switch (status) {
4198 	case  0:
4199 		break;
4200 	case -ENODEV:
4201 	case -ENOENT:
4202 		netif_dbg(dev, timer, dev->net,
4203 			  "intr resubmit %d (disconnect?)", status);
4204 		netif_device_detach(dev->net);
4205 		break;
4206 	default:
4207 		netif_err(dev, timer, dev->net,
4208 			  "intr resubmit --> %d\n", status);
4209 		break;
4210 	}
4211 }
4212 
4213 static void lan78xx_disconnect(struct usb_interface *intf)
4214 {
4215 	struct lan78xx_net *dev;
4216 	struct usb_device *udev;
4217 	struct net_device *net;
4218 	struct phy_device *phydev;
4219 
4220 	dev = usb_get_intfdata(intf);
4221 	usb_set_intfdata(intf, NULL);
4222 	if (!dev)
4223 		return;
4224 
4225 	netif_napi_del(&dev->napi);
4226 
4227 	udev = interface_to_usbdev(intf);
4228 	net = dev->net;
4229 
4230 	unregister_netdev(net);
4231 
4232 	timer_shutdown_sync(&dev->stat_monitor);
4233 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4234 	cancel_delayed_work_sync(&dev->wq);
4235 
4236 	phydev = net->phydev;
4237 
4238 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4239 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4240 
4241 	phy_disconnect(net->phydev);
4242 
4243 	if (phy_is_pseudo_fixed_link(phydev))
4244 		fixed_phy_unregister(phydev);
4245 
4246 	usb_scuttle_anchored_urbs(&dev->deferred);
4247 
4248 	lan78xx_unbind(dev, intf);
4249 
4250 	lan78xx_free_tx_resources(dev);
4251 	lan78xx_free_rx_resources(dev);
4252 
4253 	usb_kill_urb(dev->urb_intr);
4254 	usb_free_urb(dev->urb_intr);
4255 
4256 	free_netdev(net);
4257 	usb_put_dev(udev);
4258 }
4259 
4260 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4261 {
4262 	struct lan78xx_net *dev = netdev_priv(net);
4263 
4264 	unlink_urbs(dev, &dev->txq);
4265 	napi_schedule(&dev->napi);
4266 }
4267 
4268 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4269 						struct net_device *netdev,
4270 						netdev_features_t features)
4271 {
4272 	struct lan78xx_net *dev = netdev_priv(netdev);
4273 
4274 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4275 		features &= ~NETIF_F_GSO_MASK;
4276 
4277 	features = vlan_features_check(skb, features);
4278 	features = vxlan_features_check(skb, features);
4279 
4280 	return features;
4281 }
4282 
4283 static const struct net_device_ops lan78xx_netdev_ops = {
4284 	.ndo_open		= lan78xx_open,
4285 	.ndo_stop		= lan78xx_stop,
4286 	.ndo_start_xmit		= lan78xx_start_xmit,
4287 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4288 	.ndo_change_mtu		= lan78xx_change_mtu,
4289 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4290 	.ndo_validate_addr	= eth_validate_addr,
4291 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4292 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4293 	.ndo_set_features	= lan78xx_set_features,
4294 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4295 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4296 	.ndo_features_check	= lan78xx_features_check,
4297 };
4298 
4299 static void lan78xx_stat_monitor(struct timer_list *t)
4300 {
4301 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4302 
4303 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4304 }
4305 
4306 static int lan78xx_probe(struct usb_interface *intf,
4307 			 const struct usb_device_id *id)
4308 {
4309 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4310 	struct lan78xx_net *dev;
4311 	struct net_device *netdev;
4312 	struct usb_device *udev;
4313 	int ret;
4314 	unsigned int maxp;
4315 	unsigned int period;
4316 	u8 *buf = NULL;
4317 
4318 	udev = interface_to_usbdev(intf);
4319 	udev = usb_get_dev(udev);
4320 
4321 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4322 	if (!netdev) {
4323 		dev_err(&intf->dev, "Error: OOM\n");
4324 		ret = -ENOMEM;
4325 		goto out1;
4326 	}
4327 
4328 	/* netdev_printk() needs this */
4329 	SET_NETDEV_DEV(netdev, &intf->dev);
4330 
4331 	dev = netdev_priv(netdev);
4332 	dev->udev = udev;
4333 	dev->intf = intf;
4334 	dev->net = netdev;
4335 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4336 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4337 
4338 	skb_queue_head_init(&dev->rxq);
4339 	skb_queue_head_init(&dev->txq);
4340 	skb_queue_head_init(&dev->rxq_done);
4341 	skb_queue_head_init(&dev->txq_pend);
4342 	skb_queue_head_init(&dev->rxq_overflow);
4343 	mutex_init(&dev->phy_mutex);
4344 	mutex_init(&dev->dev_mutex);
4345 
4346 	ret = lan78xx_urb_config_init(dev);
4347 	if (ret < 0)
4348 		goto out2;
4349 
4350 	ret = lan78xx_alloc_tx_resources(dev);
4351 	if (ret < 0)
4352 		goto out2;
4353 
4354 	ret = lan78xx_alloc_rx_resources(dev);
4355 	if (ret < 0)
4356 		goto out3;
4357 
4358 	/* MTU range: 68 - 9000 */
4359 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4360 
4361 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4362 
4363 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4364 
4365 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4366 	init_usb_anchor(&dev->deferred);
4367 
4368 	netdev->netdev_ops = &lan78xx_netdev_ops;
4369 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4370 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4371 
4372 	dev->delta = 1;
4373 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4374 
4375 	mutex_init(&dev->stats.access_lock);
4376 
4377 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4378 		ret = -ENODEV;
4379 		goto out4;
4380 	}
4381 
4382 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4383 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4384 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4385 		ret = -ENODEV;
4386 		goto out4;
4387 	}
4388 
4389 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4390 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4391 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4392 		ret = -ENODEV;
4393 		goto out4;
4394 	}
4395 
4396 	ep_intr = &intf->cur_altsetting->endpoint[2];
4397 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4398 		ret = -ENODEV;
4399 		goto out4;
4400 	}
4401 
4402 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4403 					usb_endpoint_num(&ep_intr->desc));
4404 
4405 	ret = lan78xx_bind(dev, intf);
4406 	if (ret < 0)
4407 		goto out4;
4408 
4409 	period = ep_intr->desc.bInterval;
4410 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4411 	buf = kmalloc(maxp, GFP_KERNEL);
4412 	if (!buf) {
4413 		ret = -ENOMEM;
4414 		goto out5;
4415 	}
4416 
4417 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4418 	if (!dev->urb_intr) {
4419 		ret = -ENOMEM;
4420 		goto out6;
4421 	} else {
4422 		usb_fill_int_urb(dev->urb_intr, dev->udev,
4423 				 dev->pipe_intr, buf, maxp,
4424 				 intr_complete, dev, period);
4425 		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4426 	}
4427 
4428 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4429 
4430 	/* Reject broken descriptors. */
4431 	if (dev->maxpacket == 0) {
4432 		ret = -ENODEV;
4433 		goto out6;
4434 	}
4435 
4436 	/* driver requires remote-wakeup capability during autosuspend. */
4437 	intf->needs_remote_wakeup = 1;
4438 
4439 	ret = lan78xx_phy_init(dev);
4440 	if (ret < 0)
4441 		goto out7;
4442 
4443 	ret = register_netdev(netdev);
4444 	if (ret != 0) {
4445 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4446 		goto out8;
4447 	}
4448 
4449 	usb_set_intfdata(intf, dev);
4450 
4451 	ret = device_set_wakeup_enable(&udev->dev, true);
4452 
4453 	 /* Default delay of 2sec has more overhead than advantage.
4454 	  * Set to 10sec as default.
4455 	  */
4456 	pm_runtime_set_autosuspend_delay(&udev->dev,
4457 					 DEFAULT_AUTOSUSPEND_DELAY);
4458 
4459 	return 0;
4460 
4461 out8:
4462 	phy_disconnect(netdev->phydev);
4463 out7:
4464 	usb_free_urb(dev->urb_intr);
4465 out6:
4466 	kfree(buf);
4467 out5:
4468 	lan78xx_unbind(dev, intf);
4469 out4:
4470 	netif_napi_del(&dev->napi);
4471 	lan78xx_free_rx_resources(dev);
4472 out3:
4473 	lan78xx_free_tx_resources(dev);
4474 out2:
4475 	free_netdev(netdev);
4476 out1:
4477 	usb_put_dev(udev);
4478 
4479 	return ret;
4480 }
4481 
4482 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4483 {
4484 	const u16 crc16poly = 0x8005;
4485 	int i;
4486 	u16 bit, crc, msb;
4487 	u8 data;
4488 
4489 	crc = 0xFFFF;
4490 	for (i = 0; i < len; i++) {
4491 		data = *buf++;
4492 		for (bit = 0; bit < 8; bit++) {
4493 			msb = crc >> 15;
4494 			crc <<= 1;
4495 
4496 			if (msb ^ (u16)(data & 1)) {
4497 				crc ^= crc16poly;
4498 				crc |= (u16)0x0001U;
4499 			}
4500 			data >>= 1;
4501 		}
4502 	}
4503 
4504 	return crc;
4505 }
4506 
4507 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4508 {
4509 	u32 buf;
4510 	int ret;
4511 
4512 	ret = lan78xx_stop_tx_path(dev);
4513 	if (ret < 0)
4514 		return ret;
4515 
4516 	ret = lan78xx_stop_rx_path(dev);
4517 	if (ret < 0)
4518 		return ret;
4519 
4520 	/* auto suspend (selective suspend) */
4521 
4522 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4523 	if (ret < 0)
4524 		return ret;
4525 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4526 	if (ret < 0)
4527 		return ret;
4528 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4529 	if (ret < 0)
4530 		return ret;
4531 
4532 	/* set goodframe wakeup */
4533 
4534 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4535 	if (ret < 0)
4536 		return ret;
4537 
4538 	buf |= WUCSR_RFE_WAKE_EN_;
4539 	buf |= WUCSR_STORE_WAKE_;
4540 
4541 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4542 	if (ret < 0)
4543 		return ret;
4544 
4545 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4546 	if (ret < 0)
4547 		return ret;
4548 
4549 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4550 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4551 	buf |= PMT_CTL_PHY_WAKE_EN_;
4552 	buf |= PMT_CTL_WOL_EN_;
4553 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4554 	buf |= PMT_CTL_SUS_MODE_3_;
4555 
4556 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4557 	if (ret < 0)
4558 		return ret;
4559 
4560 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4561 	if (ret < 0)
4562 		return ret;
4563 
4564 	buf |= PMT_CTL_WUPS_MASK_;
4565 
4566 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4567 	if (ret < 0)
4568 		return ret;
4569 
4570 	ret = lan78xx_start_rx_path(dev);
4571 
4572 	return ret;
4573 }
4574 
4575 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4576 {
4577 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4578 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4579 	const u8 arp_type[2] = { 0x08, 0x06 };
4580 	u32 temp_pmt_ctl;
4581 	int mask_index;
4582 	u32 temp_wucsr;
4583 	u32 buf;
4584 	u16 crc;
4585 	int ret;
4586 
4587 	ret = lan78xx_stop_tx_path(dev);
4588 	if (ret < 0)
4589 		return ret;
4590 	ret = lan78xx_stop_rx_path(dev);
4591 	if (ret < 0)
4592 		return ret;
4593 
4594 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4595 	if (ret < 0)
4596 		return ret;
4597 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4598 	if (ret < 0)
4599 		return ret;
4600 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4601 	if (ret < 0)
4602 		return ret;
4603 
4604 	temp_wucsr = 0;
4605 
4606 	temp_pmt_ctl = 0;
4607 
4608 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4609 	if (ret < 0)
4610 		return ret;
4611 
4612 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4613 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4614 
4615 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4616 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4617 		if (ret < 0)
4618 			return ret;
4619 	}
4620 
4621 	mask_index = 0;
4622 	if (wol & WAKE_PHY) {
4623 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4624 
4625 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4626 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4627 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4628 	}
4629 	if (wol & WAKE_MAGIC) {
4630 		temp_wucsr |= WUCSR_MPEN_;
4631 
4632 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4633 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4634 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4635 	}
4636 	if (wol & WAKE_BCAST) {
4637 		temp_wucsr |= WUCSR_BCST_EN_;
4638 
4639 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4640 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4641 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4642 	}
4643 	if (wol & WAKE_MCAST) {
4644 		temp_wucsr |= WUCSR_WAKE_EN_;
4645 
4646 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4647 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4648 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4649 					WUF_CFGX_EN_ |
4650 					WUF_CFGX_TYPE_MCAST_ |
4651 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4652 					(crc & WUF_CFGX_CRC16_MASK_));
4653 		if (ret < 0)
4654 			return ret;
4655 
4656 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4657 		if (ret < 0)
4658 			return ret;
4659 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4660 		if (ret < 0)
4661 			return ret;
4662 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4663 		if (ret < 0)
4664 			return ret;
4665 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4666 		if (ret < 0)
4667 			return ret;
4668 
4669 		mask_index++;
4670 
4671 		/* for IPv6 Multicast */
4672 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4673 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4674 					WUF_CFGX_EN_ |
4675 					WUF_CFGX_TYPE_MCAST_ |
4676 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4677 					(crc & WUF_CFGX_CRC16_MASK_));
4678 		if (ret < 0)
4679 			return ret;
4680 
4681 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4682 		if (ret < 0)
4683 			return ret;
4684 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4685 		if (ret < 0)
4686 			return ret;
4687 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4688 		if (ret < 0)
4689 			return ret;
4690 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4691 		if (ret < 0)
4692 			return ret;
4693 
4694 		mask_index++;
4695 
4696 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4697 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4698 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4699 	}
4700 	if (wol & WAKE_UCAST) {
4701 		temp_wucsr |= WUCSR_PFDA_EN_;
4702 
4703 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4704 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4705 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4706 	}
4707 	if (wol & WAKE_ARP) {
4708 		temp_wucsr |= WUCSR_WAKE_EN_;
4709 
4710 		/* set WUF_CFG & WUF_MASK
4711 		 * for packettype (offset 12,13) = ARP (0x0806)
4712 		 */
4713 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4714 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4715 					WUF_CFGX_EN_ |
4716 					WUF_CFGX_TYPE_ALL_ |
4717 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4718 					(crc & WUF_CFGX_CRC16_MASK_));
4719 		if (ret < 0)
4720 			return ret;
4721 
4722 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4723 		if (ret < 0)
4724 			return ret;
4725 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4726 		if (ret < 0)
4727 			return ret;
4728 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4729 		if (ret < 0)
4730 			return ret;
4731 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4732 		if (ret < 0)
4733 			return ret;
4734 
4735 		mask_index++;
4736 
4737 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4738 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4739 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4740 	}
4741 
4742 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4743 	if (ret < 0)
4744 		return ret;
4745 
4746 	/* when multiple WOL bits are set */
4747 	if (hweight_long((unsigned long)wol) > 1) {
4748 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4749 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4750 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4751 	}
4752 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4753 	if (ret < 0)
4754 		return ret;
4755 
4756 	/* clear WUPS */
4757 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4758 	if (ret < 0)
4759 		return ret;
4760 
4761 	buf |= PMT_CTL_WUPS_MASK_;
4762 
4763 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4764 	if (ret < 0)
4765 		return ret;
4766 
4767 	ret = lan78xx_start_rx_path(dev);
4768 
4769 	return ret;
4770 }
4771 
4772 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4773 {
4774 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4775 	bool dev_open;
4776 	int ret;
4777 
4778 	mutex_lock(&dev->dev_mutex);
4779 
4780 	netif_dbg(dev, ifdown, dev->net,
4781 		  "suspending: pm event %#x", message.event);
4782 
4783 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4784 
4785 	if (dev_open) {
4786 		spin_lock_irq(&dev->txq.lock);
4787 		/* don't autosuspend while transmitting */
4788 		if ((skb_queue_len(&dev->txq) ||
4789 		     skb_queue_len(&dev->txq_pend)) &&
4790 		    PMSG_IS_AUTO(message)) {
4791 			spin_unlock_irq(&dev->txq.lock);
4792 			ret = -EBUSY;
4793 			goto out;
4794 		} else {
4795 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4796 			spin_unlock_irq(&dev->txq.lock);
4797 		}
4798 
4799 		/* stop RX */
4800 		ret = lan78xx_stop_rx_path(dev);
4801 		if (ret < 0)
4802 			goto out;
4803 
4804 		ret = lan78xx_flush_rx_fifo(dev);
4805 		if (ret < 0)
4806 			goto out;
4807 
4808 		/* stop Tx */
4809 		ret = lan78xx_stop_tx_path(dev);
4810 		if (ret < 0)
4811 			goto out;
4812 
4813 		/* empty out the Rx and Tx queues */
4814 		netif_device_detach(dev->net);
4815 		lan78xx_terminate_urbs(dev);
4816 		usb_kill_urb(dev->urb_intr);
4817 
4818 		/* reattach */
4819 		netif_device_attach(dev->net);
4820 
4821 		del_timer(&dev->stat_monitor);
4822 
4823 		if (PMSG_IS_AUTO(message)) {
4824 			ret = lan78xx_set_auto_suspend(dev);
4825 			if (ret < 0)
4826 				goto out;
4827 		} else {
4828 			struct lan78xx_priv *pdata;
4829 
4830 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4831 			netif_carrier_off(dev->net);
4832 			ret = lan78xx_set_suspend(dev, pdata->wol);
4833 			if (ret < 0)
4834 				goto out;
4835 		}
4836 	} else {
4837 		/* Interface is down; don't allow WOL and PHY
4838 		 * events to wake up the host
4839 		 */
4840 		u32 buf;
4841 
4842 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4843 
4844 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4845 		if (ret < 0)
4846 			goto out;
4847 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4848 		if (ret < 0)
4849 			goto out;
4850 
4851 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4852 		if (ret < 0)
4853 			goto out;
4854 
4855 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4856 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4857 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4858 		buf |= PMT_CTL_SUS_MODE_3_;
4859 
4860 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4861 		if (ret < 0)
4862 			goto out;
4863 
4864 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4865 		if (ret < 0)
4866 			goto out;
4867 
4868 		buf |= PMT_CTL_WUPS_MASK_;
4869 
4870 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4871 		if (ret < 0)
4872 			goto out;
4873 	}
4874 
4875 	ret = 0;
4876 out:
4877 	mutex_unlock(&dev->dev_mutex);
4878 
4879 	return ret;
4880 }
4881 
4882 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4883 {
4884 	bool pipe_halted = false;
4885 	struct urb *urb;
4886 
4887 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4888 		struct sk_buff *skb = urb->context;
4889 		int ret;
4890 
4891 		if (!netif_device_present(dev->net) ||
4892 		    !netif_carrier_ok(dev->net) ||
4893 		    pipe_halted) {
4894 			lan78xx_release_tx_buf(dev, skb);
4895 			continue;
4896 		}
4897 
4898 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4899 
4900 		if (ret == 0) {
4901 			netif_trans_update(dev->net);
4902 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4903 		} else {
4904 			if (ret == -EPIPE) {
4905 				netif_stop_queue(dev->net);
4906 				pipe_halted = true;
4907 			} else if (ret == -ENODEV) {
4908 				netif_device_detach(dev->net);
4909 			}
4910 
4911 			lan78xx_release_tx_buf(dev, skb);
4912 		}
4913 	}
4914 
4915 	return pipe_halted;
4916 }
4917 
4918 static int lan78xx_resume(struct usb_interface *intf)
4919 {
4920 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4921 	bool dev_open;
4922 	int ret;
4923 
4924 	mutex_lock(&dev->dev_mutex);
4925 
4926 	netif_dbg(dev, ifup, dev->net, "resuming device");
4927 
4928 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4929 
4930 	if (dev_open) {
4931 		bool pipe_halted = false;
4932 
4933 		ret = lan78xx_flush_tx_fifo(dev);
4934 		if (ret < 0)
4935 			goto out;
4936 
4937 		if (dev->urb_intr) {
4938 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4939 
4940 			if (ret < 0) {
4941 				if (ret == -ENODEV)
4942 					netif_device_detach(dev->net);
4943 				netdev_warn(dev->net, "Failed to submit intr URB");
4944 			}
4945 		}
4946 
4947 		spin_lock_irq(&dev->txq.lock);
4948 
4949 		if (netif_device_present(dev->net)) {
4950 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4951 
4952 			if (pipe_halted)
4953 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4954 		}
4955 
4956 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4957 
4958 		spin_unlock_irq(&dev->txq.lock);
4959 
4960 		if (!pipe_halted &&
4961 		    netif_device_present(dev->net) &&
4962 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4963 			netif_start_queue(dev->net);
4964 
4965 		ret = lan78xx_start_tx_path(dev);
4966 		if (ret < 0)
4967 			goto out;
4968 
4969 		napi_schedule(&dev->napi);
4970 
4971 		if (!timer_pending(&dev->stat_monitor)) {
4972 			dev->delta = 1;
4973 			mod_timer(&dev->stat_monitor,
4974 				  jiffies + STAT_UPDATE_TIMER);
4975 		}
4976 
4977 	} else {
4978 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4979 	}
4980 
4981 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4982 	if (ret < 0)
4983 		goto out;
4984 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4985 	if (ret < 0)
4986 		goto out;
4987 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4988 	if (ret < 0)
4989 		goto out;
4990 
4991 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4992 					     WUCSR2_ARP_RCD_ |
4993 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4994 					     WUCSR2_IPV4_TCPSYN_RCD_);
4995 	if (ret < 0)
4996 		goto out;
4997 
4998 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4999 					    WUCSR_EEE_RX_WAKE_ |
5000 					    WUCSR_PFDA_FR_ |
5001 					    WUCSR_RFE_WAKE_FR_ |
5002 					    WUCSR_WUFR_ |
5003 					    WUCSR_MPR_ |
5004 					    WUCSR_BCST_FR_);
5005 	if (ret < 0)
5006 		goto out;
5007 
5008 	ret = 0;
5009 out:
5010 	mutex_unlock(&dev->dev_mutex);
5011 
5012 	return ret;
5013 }
5014 
5015 static int lan78xx_reset_resume(struct usb_interface *intf)
5016 {
5017 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5018 	int ret;
5019 
5020 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5021 
5022 	ret = lan78xx_reset(dev);
5023 	if (ret < 0)
5024 		return ret;
5025 
5026 	phy_start(dev->net->phydev);
5027 
5028 	ret = lan78xx_resume(intf);
5029 
5030 	return ret;
5031 }
5032 
5033 static const struct usb_device_id products[] = {
5034 	{
5035 	/* LAN7800 USB Gigabit Ethernet Device */
5036 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5037 	},
5038 	{
5039 	/* LAN7850 USB Gigabit Ethernet Device */
5040 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5041 	},
5042 	{
5043 	/* LAN7801 USB Gigabit Ethernet Device */
5044 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5045 	},
5046 	{
5047 	/* ATM2-AF USB Gigabit Ethernet Device */
5048 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5049 	},
5050 	{},
5051 };
5052 MODULE_DEVICE_TABLE(usb, products);
5053 
5054 static struct usb_driver lan78xx_driver = {
5055 	.name			= DRIVER_NAME,
5056 	.id_table		= products,
5057 	.probe			= lan78xx_probe,
5058 	.disconnect		= lan78xx_disconnect,
5059 	.suspend		= lan78xx_suspend,
5060 	.resume			= lan78xx_resume,
5061 	.reset_resume		= lan78xx_reset_resume,
5062 	.supports_autosuspend	= 1,
5063 	.disable_hub_initiated_lpm = 1,
5064 };
5065 
5066 module_usb_driver(lan78xx_driver);
5067 
5068 MODULE_AUTHOR(DRIVER_AUTHOR);
5069 MODULE_DESCRIPTION(DRIVER_DESC);
5070 MODULE_LICENSE("GPL");
5071