xref: /linux/drivers/net/usb/lan78xx.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 struct usb_context {
384 	struct usb_ctrlrequest req;
385 	struct lan78xx_net *dev;
386 };
387 
388 #define EVENT_TX_HALT			0
389 #define EVENT_RX_HALT			1
390 #define EVENT_RX_MEMORY			2
391 #define EVENT_STS_SPLIT			3
392 #define EVENT_LINK_RESET		4
393 #define EVENT_RX_PAUSED			5
394 #define EVENT_DEV_WAKING		6
395 #define EVENT_DEV_ASLEEP		7
396 #define EVENT_DEV_OPEN			8
397 #define EVENT_STAT_UPDATE		9
398 #define EVENT_DEV_DISCONNECT		10
399 
400 struct statstage {
401 	struct mutex			access_lock;	/* for stats access */
402 	struct lan78xx_statstage	saved;
403 	struct lan78xx_statstage	rollover_count;
404 	struct lan78xx_statstage	rollover_max;
405 	struct lan78xx_statstage64	curr_stat;
406 };
407 
408 struct irq_domain_data {
409 	struct irq_domain	*irqdomain;
410 	unsigned int		phyirq;
411 	struct irq_chip		*irqchip;
412 	irq_flow_handler_t	irq_handler;
413 	u32			irqenable;
414 	struct mutex		irq_lock;		/* for irq bus access */
415 };
416 
417 struct lan78xx_net {
418 	struct net_device	*net;
419 	struct usb_device	*udev;
420 	struct usb_interface	*intf;
421 	void			*driver_priv;
422 
423 	unsigned int		tx_pend_data_len;
424 	size_t			n_tx_urbs;
425 	size_t			n_rx_urbs;
426 	size_t			tx_urb_size;
427 	size_t			rx_urb_size;
428 
429 	struct sk_buff_head	rxq_free;
430 	struct sk_buff_head	rxq;
431 	struct sk_buff_head	rxq_done;
432 	struct sk_buff_head	rxq_overflow;
433 	struct sk_buff_head	txq_free;
434 	struct sk_buff_head	txq;
435 	struct sk_buff_head	txq_pend;
436 
437 	struct napi_struct	napi;
438 
439 	struct delayed_work	wq;
440 
441 	int			msg_enable;
442 
443 	struct urb		*urb_intr;
444 	struct usb_anchor	deferred;
445 
446 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
447 	struct mutex		phy_mutex; /* for phy access */
448 	unsigned int		pipe_in, pipe_out, pipe_intr;
449 
450 	unsigned int		bulk_in_delay;
451 	unsigned int		burst_cap;
452 
453 	unsigned long		flags;
454 
455 	wait_queue_head_t	*wait;
456 	unsigned char		suspend_count;
457 
458 	unsigned int		maxpacket;
459 	struct timer_list	stat_monitor;
460 
461 	unsigned long		data[5];
462 
463 	int			link_on;
464 	u8			mdix_ctrl;
465 
466 	u32			chipid;
467 	u32			chiprev;
468 	struct mii_bus		*mdiobus;
469 	phy_interface_t		interface;
470 
471 	int			fc_autoneg;
472 	u8			fc_request_control;
473 
474 	int			delta;
475 	struct statstage	stats;
476 
477 	struct irq_domain_data	domain_data;
478 };
479 
480 /* define external phy id */
481 #define	PHY_LAN8835			(0x0007C130)
482 #define	PHY_KSZ9031RNX			(0x00221620)
483 
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
488 
489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490 {
491 	if (skb_queue_empty(buf_pool))
492 		return NULL;
493 
494 	return skb_dequeue(buf_pool);
495 }
496 
497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498 				struct sk_buff *buf)
499 {
500 	buf->data = buf->head;
501 	skb_reset_tail_pointer(buf);
502 
503 	buf->len = 0;
504 	buf->data_len = 0;
505 
506 	skb_queue_tail(buf_pool, buf);
507 }
508 
509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510 {
511 	struct skb_data *entry;
512 	struct sk_buff *buf;
513 
514 	while (!skb_queue_empty(buf_pool)) {
515 		buf = skb_dequeue(buf_pool);
516 		if (buf) {
517 			entry = (struct skb_data *)buf->cb;
518 			usb_free_urb(entry->urb);
519 			dev_kfree_skb_any(buf);
520 		}
521 	}
522 }
523 
524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525 				  size_t n_urbs, size_t urb_size,
526 				  struct lan78xx_net *dev)
527 {
528 	struct skb_data *entry;
529 	struct sk_buff *buf;
530 	struct urb *urb;
531 	int i;
532 
533 	skb_queue_head_init(buf_pool);
534 
535 	for (i = 0; i < n_urbs; i++) {
536 		buf = alloc_skb(urb_size, GFP_ATOMIC);
537 		if (!buf)
538 			goto error;
539 
540 		if (skb_linearize(buf) != 0) {
541 			dev_kfree_skb_any(buf);
542 			goto error;
543 		}
544 
545 		urb = usb_alloc_urb(0, GFP_ATOMIC);
546 		if (!urb) {
547 			dev_kfree_skb_any(buf);
548 			goto error;
549 		}
550 
551 		entry = (struct skb_data *)buf->cb;
552 		entry->urb = urb;
553 		entry->dev = dev;
554 		entry->length = 0;
555 		entry->num_of_packet = 0;
556 
557 		skb_queue_tail(buf_pool, buf);
558 	}
559 
560 	return 0;
561 
562 error:
563 	lan78xx_free_buf_pool(buf_pool);
564 
565 	return -ENOMEM;
566 }
567 
568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569 {
570 	return lan78xx_get_buf(&dev->rxq_free);
571 }
572 
573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574 				   struct sk_buff *rx_buf)
575 {
576 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
577 }
578 
579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580 {
581 	lan78xx_free_buf_pool(&dev->rxq_free);
582 }
583 
584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585 {
586 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
587 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
588 }
589 
590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591 {
592 	return lan78xx_get_buf(&dev->txq_free);
593 }
594 
595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596 				   struct sk_buff *tx_buf)
597 {
598 	lan78xx_release_buf(&dev->txq_free, tx_buf);
599 }
600 
601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602 {
603 	lan78xx_free_buf_pool(&dev->txq_free);
604 }
605 
606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607 {
608 	return lan78xx_alloc_buf_pool(&dev->txq_free,
609 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
610 }
611 
612 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
613 {
614 	u32 *buf;
615 	int ret;
616 
617 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
618 		return -ENODEV;
619 
620 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
621 	if (!buf)
622 		return -ENOMEM;
623 
624 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
625 			      USB_VENDOR_REQUEST_READ_REGISTER,
626 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
627 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
628 	if (likely(ret >= 0)) {
629 		le32_to_cpus(buf);
630 		*data = *buf;
631 	} else if (net_ratelimit()) {
632 		netdev_warn(dev->net,
633 			    "Failed to read register index 0x%08x. ret = %d",
634 			    index, ret);
635 	}
636 
637 	kfree(buf);
638 
639 	return ret;
640 }
641 
642 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
643 {
644 	u32 *buf;
645 	int ret;
646 
647 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
648 		return -ENODEV;
649 
650 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
651 	if (!buf)
652 		return -ENOMEM;
653 
654 	*buf = data;
655 	cpu_to_le32s(buf);
656 
657 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
658 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
659 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
660 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
661 	if (unlikely(ret < 0) &&
662 	    net_ratelimit()) {
663 		netdev_warn(dev->net,
664 			    "Failed to write register index 0x%08x. ret = %d",
665 			    index, ret);
666 	}
667 
668 	kfree(buf);
669 
670 	return ret;
671 }
672 
673 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
674 			      u32 data)
675 {
676 	int ret;
677 	u32 buf;
678 
679 	ret = lan78xx_read_reg(dev, reg, &buf);
680 	if (ret < 0)
681 		return ret;
682 
683 	buf &= ~mask;
684 	buf |= (mask & data);
685 
686 	ret = lan78xx_write_reg(dev, reg, buf);
687 	if (ret < 0)
688 		return ret;
689 
690 	return 0;
691 }
692 
693 static int lan78xx_read_stats(struct lan78xx_net *dev,
694 			      struct lan78xx_statstage *data)
695 {
696 	int ret = 0;
697 	int i;
698 	struct lan78xx_statstage *stats;
699 	u32 *src;
700 	u32 *dst;
701 
702 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
703 	if (!stats)
704 		return -ENOMEM;
705 
706 	ret = usb_control_msg(dev->udev,
707 			      usb_rcvctrlpipe(dev->udev, 0),
708 			      USB_VENDOR_REQUEST_GET_STATS,
709 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
710 			      0,
711 			      0,
712 			      (void *)stats,
713 			      sizeof(*stats),
714 			      USB_CTRL_SET_TIMEOUT);
715 	if (likely(ret >= 0)) {
716 		src = (u32 *)stats;
717 		dst = (u32 *)data;
718 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
719 			le32_to_cpus(&src[i]);
720 			dst[i] = src[i];
721 		}
722 	} else {
723 		netdev_warn(dev->net,
724 			    "Failed to read stat ret = %d", ret);
725 	}
726 
727 	kfree(stats);
728 
729 	return ret;
730 }
731 
732 #define check_counter_rollover(struct1, dev_stats, member)		\
733 	do {								\
734 		if ((struct1)->member < (dev_stats).saved.member)	\
735 			(dev_stats).rollover_count.member++;		\
736 	} while (0)
737 
738 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
739 					struct lan78xx_statstage *stats)
740 {
741 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
742 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
743 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
744 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
745 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
746 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
747 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
748 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
749 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
750 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
752 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
753 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
755 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
756 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
757 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
759 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
761 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
762 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
763 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
764 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
765 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
766 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
767 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
768 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
769 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
770 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
771 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
773 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
774 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
776 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
777 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
779 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
780 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
781 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
783 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
785 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
786 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
787 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
788 
789 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
790 }
791 
792 static void lan78xx_update_stats(struct lan78xx_net *dev)
793 {
794 	u32 *p, *count, *max;
795 	u64 *data;
796 	int i;
797 	struct lan78xx_statstage lan78xx_stats;
798 
799 	if (usb_autopm_get_interface(dev->intf) < 0)
800 		return;
801 
802 	p = (u32 *)&lan78xx_stats;
803 	count = (u32 *)&dev->stats.rollover_count;
804 	max = (u32 *)&dev->stats.rollover_max;
805 	data = (u64 *)&dev->stats.curr_stat;
806 
807 	mutex_lock(&dev->stats.access_lock);
808 
809 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
810 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
811 
812 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
813 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
814 
815 	mutex_unlock(&dev->stats.access_lock);
816 
817 	usb_autopm_put_interface(dev->intf);
818 }
819 
820 /* Loop until the read is completed with timeout called with phy_mutex held */
821 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
822 {
823 	unsigned long start_time = jiffies;
824 	u32 val;
825 	int ret;
826 
827 	do {
828 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
829 		if (unlikely(ret < 0))
830 			return -EIO;
831 
832 		if (!(val & MII_ACC_MII_BUSY_))
833 			return 0;
834 	} while (!time_after(jiffies, start_time + HZ));
835 
836 	return -EIO;
837 }
838 
839 static inline u32 mii_access(int id, int index, int read)
840 {
841 	u32 ret;
842 
843 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
844 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
845 	if (read)
846 		ret |= MII_ACC_MII_READ_;
847 	else
848 		ret |= MII_ACC_MII_WRITE_;
849 	ret |= MII_ACC_MII_BUSY_;
850 
851 	return ret;
852 }
853 
854 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
855 {
856 	unsigned long start_time = jiffies;
857 	u32 val;
858 	int ret;
859 
860 	do {
861 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
862 		if (unlikely(ret < 0))
863 			return -EIO;
864 
865 		if (!(val & E2P_CMD_EPC_BUSY_) ||
866 		    (val & E2P_CMD_EPC_TIMEOUT_))
867 			break;
868 		usleep_range(40, 100);
869 	} while (!time_after(jiffies, start_time + HZ));
870 
871 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
872 		netdev_warn(dev->net, "EEPROM read operation timeout");
873 		return -EIO;
874 	}
875 
876 	return 0;
877 }
878 
879 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
880 {
881 	unsigned long start_time = jiffies;
882 	u32 val;
883 	int ret;
884 
885 	do {
886 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
887 		if (unlikely(ret < 0))
888 			return -EIO;
889 
890 		if (!(val & E2P_CMD_EPC_BUSY_))
891 			return 0;
892 
893 		usleep_range(40, 100);
894 	} while (!time_after(jiffies, start_time + HZ));
895 
896 	netdev_warn(dev->net, "EEPROM is busy");
897 	return -EIO;
898 }
899 
900 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
901 				   u32 length, u8 *data)
902 {
903 	u32 val;
904 	u32 saved;
905 	int i, ret;
906 	int retval;
907 
908 	/* depends on chip, some EEPROM pins are muxed with LED function.
909 	 * disable & restore LED function to access EEPROM.
910 	 */
911 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
912 	saved = val;
913 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
914 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
915 		ret = lan78xx_write_reg(dev, HW_CFG, val);
916 	}
917 
918 	retval = lan78xx_eeprom_confirm_not_busy(dev);
919 	if (retval)
920 		return retval;
921 
922 	for (i = 0; i < length; i++) {
923 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
924 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
925 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
926 		if (unlikely(ret < 0)) {
927 			retval = -EIO;
928 			goto exit;
929 		}
930 
931 		retval = lan78xx_wait_eeprom(dev);
932 		if (retval < 0)
933 			goto exit;
934 
935 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
936 		if (unlikely(ret < 0)) {
937 			retval = -EIO;
938 			goto exit;
939 		}
940 
941 		data[i] = val & 0xFF;
942 		offset++;
943 	}
944 
945 	retval = 0;
946 exit:
947 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
948 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
949 
950 	return retval;
951 }
952 
953 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
954 			       u32 length, u8 *data)
955 {
956 	u8 sig;
957 	int ret;
958 
959 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
960 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
961 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
962 	else
963 		ret = -EINVAL;
964 
965 	return ret;
966 }
967 
968 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
969 				    u32 length, u8 *data)
970 {
971 	u32 val;
972 	u32 saved;
973 	int i, ret;
974 	int retval;
975 
976 	/* depends on chip, some EEPROM pins are muxed with LED function.
977 	 * disable & restore LED function to access EEPROM.
978 	 */
979 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
980 	saved = val;
981 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
982 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
983 		ret = lan78xx_write_reg(dev, HW_CFG, val);
984 	}
985 
986 	retval = lan78xx_eeprom_confirm_not_busy(dev);
987 	if (retval)
988 		goto exit;
989 
990 	/* Issue write/erase enable command */
991 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
992 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
993 	if (unlikely(ret < 0)) {
994 		retval = -EIO;
995 		goto exit;
996 	}
997 
998 	retval = lan78xx_wait_eeprom(dev);
999 	if (retval < 0)
1000 		goto exit;
1001 
1002 	for (i = 0; i < length; i++) {
1003 		/* Fill data register */
1004 		val = data[i];
1005 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1006 		if (ret < 0) {
1007 			retval = -EIO;
1008 			goto exit;
1009 		}
1010 
1011 		/* Send "write" command */
1012 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1013 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1014 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1015 		if (ret < 0) {
1016 			retval = -EIO;
1017 			goto exit;
1018 		}
1019 
1020 		retval = lan78xx_wait_eeprom(dev);
1021 		if (retval < 0)
1022 			goto exit;
1023 
1024 		offset++;
1025 	}
1026 
1027 	retval = 0;
1028 exit:
1029 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1030 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1031 
1032 	return retval;
1033 }
1034 
1035 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1036 				u32 length, u8 *data)
1037 {
1038 	int i;
1039 	u32 buf;
1040 	unsigned long timeout;
1041 
1042 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1043 
1044 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1045 		/* clear it and wait to be cleared */
1046 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1047 
1048 		timeout = jiffies + HZ;
1049 		do {
1050 			usleep_range(1, 10);
1051 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1052 			if (time_after(jiffies, timeout)) {
1053 				netdev_warn(dev->net,
1054 					    "timeout on OTP_PWR_DN");
1055 				return -EIO;
1056 			}
1057 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1058 	}
1059 
1060 	for (i = 0; i < length; i++) {
1061 		lan78xx_write_reg(dev, OTP_ADDR1,
1062 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1063 		lan78xx_write_reg(dev, OTP_ADDR2,
1064 				  ((offset + i) & OTP_ADDR2_10_3));
1065 
1066 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1067 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1068 
1069 		timeout = jiffies + HZ;
1070 		do {
1071 			udelay(1);
1072 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1073 			if (time_after(jiffies, timeout)) {
1074 				netdev_warn(dev->net,
1075 					    "timeout on OTP_STATUS");
1076 				return -EIO;
1077 			}
1078 		} while (buf & OTP_STATUS_BUSY_);
1079 
1080 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1081 
1082 		data[i] = (u8)(buf & 0xFF);
1083 	}
1084 
1085 	return 0;
1086 }
1087 
1088 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1089 				 u32 length, u8 *data)
1090 {
1091 	int i;
1092 	u32 buf;
1093 	unsigned long timeout;
1094 
1095 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096 
1097 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1098 		/* clear it and wait to be cleared */
1099 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1100 
1101 		timeout = jiffies + HZ;
1102 		do {
1103 			udelay(1);
1104 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1105 			if (time_after(jiffies, timeout)) {
1106 				netdev_warn(dev->net,
1107 					    "timeout on OTP_PWR_DN completion");
1108 				return -EIO;
1109 			}
1110 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1111 	}
1112 
1113 	/* set to BYTE program mode */
1114 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1115 
1116 	for (i = 0; i < length; i++) {
1117 		lan78xx_write_reg(dev, OTP_ADDR1,
1118 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1119 		lan78xx_write_reg(dev, OTP_ADDR2,
1120 				  ((offset + i) & OTP_ADDR2_10_3));
1121 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1122 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1123 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1124 
1125 		timeout = jiffies + HZ;
1126 		do {
1127 			udelay(1);
1128 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1129 			if (time_after(jiffies, timeout)) {
1130 				netdev_warn(dev->net,
1131 					    "Timeout on OTP_STATUS completion");
1132 				return -EIO;
1133 			}
1134 		} while (buf & OTP_STATUS_BUSY_);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1141 			    u32 length, u8 *data)
1142 {
1143 	u8 sig;
1144 	int ret;
1145 
1146 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1147 
1148 	if (ret == 0) {
1149 		if (sig == OTP_INDICATOR_2)
1150 			offset += 0x100;
1151 		else if (sig != OTP_INDICATOR_1)
1152 			ret = -EINVAL;
1153 		if (!ret)
1154 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1155 	}
1156 
1157 	return ret;
1158 }
1159 
1160 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1161 {
1162 	int i, ret;
1163 
1164 	for (i = 0; i < 100; i++) {
1165 		u32 dp_sel;
1166 
1167 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1168 		if (unlikely(ret < 0))
1169 			return -EIO;
1170 
1171 		if (dp_sel & DP_SEL_DPRDY_)
1172 			return 0;
1173 
1174 		usleep_range(40, 100);
1175 	}
1176 
1177 	netdev_warn(dev->net, "%s timed out", __func__);
1178 
1179 	return -EIO;
1180 }
1181 
1182 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1183 				  u32 addr, u32 length, u32 *buf)
1184 {
1185 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1186 	u32 dp_sel;
1187 	int i, ret;
1188 
1189 	if (usb_autopm_get_interface(dev->intf) < 0)
1190 		return 0;
1191 
1192 	mutex_lock(&pdata->dataport_mutex);
1193 
1194 	ret = lan78xx_dataport_wait_not_busy(dev);
1195 	if (ret < 0)
1196 		goto done;
1197 
1198 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1199 
1200 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1201 	dp_sel |= ram_select;
1202 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1203 
1204 	for (i = 0; i < length; i++) {
1205 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1206 
1207 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1210 
1211 		ret = lan78xx_dataport_wait_not_busy(dev);
1212 		if (ret < 0)
1213 			goto done;
1214 	}
1215 
1216 done:
1217 	mutex_unlock(&pdata->dataport_mutex);
1218 	usb_autopm_put_interface(dev->intf);
1219 
1220 	return ret;
1221 }
1222 
1223 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1224 				    int index, u8 addr[ETH_ALEN])
1225 {
1226 	u32 temp;
1227 
1228 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1229 		temp = addr[3];
1230 		temp = addr[2] | (temp << 8);
1231 		temp = addr[1] | (temp << 8);
1232 		temp = addr[0] | (temp << 8);
1233 		pdata->pfilter_table[index][1] = temp;
1234 		temp = addr[5];
1235 		temp = addr[4] | (temp << 8);
1236 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1237 		pdata->pfilter_table[index][0] = temp;
1238 	}
1239 }
1240 
1241 /* returns hash bit number for given MAC address */
1242 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1243 {
1244 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1245 }
1246 
1247 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1248 {
1249 	struct lan78xx_priv *pdata =
1250 			container_of(param, struct lan78xx_priv, set_multicast);
1251 	struct lan78xx_net *dev = pdata->dev;
1252 	int i;
1253 
1254 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1255 		  pdata->rfe_ctl);
1256 
1257 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1258 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1259 
1260 	for (i = 1; i < NUM_OF_MAF; i++) {
1261 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1262 		lan78xx_write_reg(dev, MAF_LO(i),
1263 				  pdata->pfilter_table[i][1]);
1264 		lan78xx_write_reg(dev, MAF_HI(i),
1265 				  pdata->pfilter_table[i][0]);
1266 	}
1267 
1268 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1269 }
1270 
1271 static void lan78xx_set_multicast(struct net_device *netdev)
1272 {
1273 	struct lan78xx_net *dev = netdev_priv(netdev);
1274 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275 	unsigned long flags;
1276 	int i;
1277 
1278 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1279 
1280 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1281 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1282 
1283 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1284 		pdata->mchash_table[i] = 0;
1285 
1286 	/* pfilter_table[0] has own HW address */
1287 	for (i = 1; i < NUM_OF_MAF; i++) {
1288 		pdata->pfilter_table[i][0] = 0;
1289 		pdata->pfilter_table[i][1] = 0;
1290 	}
1291 
1292 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1293 
1294 	if (dev->net->flags & IFF_PROMISC) {
1295 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1296 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1297 	} else {
1298 		if (dev->net->flags & IFF_ALLMULTI) {
1299 			netif_dbg(dev, drv, dev->net,
1300 				  "receive all multicast enabled");
1301 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1302 		}
1303 	}
1304 
1305 	if (netdev_mc_count(dev->net)) {
1306 		struct netdev_hw_addr *ha;
1307 		int i;
1308 
1309 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1310 
1311 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1312 
1313 		i = 1;
1314 		netdev_for_each_mc_addr(ha, netdev) {
1315 			/* set first 32 into Perfect Filter */
1316 			if (i < 33) {
1317 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1318 			} else {
1319 				u32 bitnum = lan78xx_hash(ha->addr);
1320 
1321 				pdata->mchash_table[bitnum / 32] |=
1322 							(1 << (bitnum % 32));
1323 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1324 			}
1325 			i++;
1326 		}
1327 	}
1328 
1329 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1330 
1331 	/* defer register writes to a sleepable context */
1332 	schedule_work(&pdata->set_multicast);
1333 }
1334 
1335 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1336 				      u16 lcladv, u16 rmtadv)
1337 {
1338 	u32 flow = 0, fct_flow = 0;
1339 	u8 cap;
1340 
1341 	if (dev->fc_autoneg)
1342 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1343 	else
1344 		cap = dev->fc_request_control;
1345 
1346 	if (cap & FLOW_CTRL_TX)
1347 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1348 
1349 	if (cap & FLOW_CTRL_RX)
1350 		flow |= FLOW_CR_RX_FCEN_;
1351 
1352 	if (dev->udev->speed == USB_SPEED_SUPER)
1353 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1354 	else if (dev->udev->speed == USB_SPEED_HIGH)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1356 
1357 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1358 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1359 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1360 
1361 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1362 
1363 	/* threshold value should be set before enabling flow */
1364 	lan78xx_write_reg(dev, FLOW, flow);
1365 
1366 	return 0;
1367 }
1368 
1369 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1370 
1371 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1372 {
1373 	unsigned long start_time = jiffies;
1374 	u32 val;
1375 	int ret;
1376 
1377 	mutex_lock(&dev->phy_mutex);
1378 
1379 	/* Resetting the device while there is activity on the MDIO
1380 	 * bus can result in the MAC interface locking up and not
1381 	 * completing register access transactions.
1382 	 */
1383 	ret = lan78xx_phy_wait_not_busy(dev);
1384 	if (ret < 0)
1385 		goto done;
1386 
1387 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1388 	if (ret < 0)
1389 		goto done;
1390 
1391 	val |= MAC_CR_RST_;
1392 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1393 	if (ret < 0)
1394 		goto done;
1395 
1396 	/* Wait for the reset to complete before allowing any further
1397 	 * MAC register accesses otherwise the MAC may lock up.
1398 	 */
1399 	do {
1400 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1401 		if (ret < 0)
1402 			goto done;
1403 
1404 		if (!(val & MAC_CR_RST_)) {
1405 			ret = 0;
1406 			goto done;
1407 		}
1408 	} while (!time_after(jiffies, start_time + HZ));
1409 
1410 	ret = -ETIMEDOUT;
1411 done:
1412 	mutex_unlock(&dev->phy_mutex);
1413 
1414 	return ret;
1415 }
1416 
1417 static int lan78xx_link_reset(struct lan78xx_net *dev)
1418 {
1419 	struct phy_device *phydev = dev->net->phydev;
1420 	struct ethtool_link_ksettings ecmd;
1421 	int ladv, radv, ret, link;
1422 	u32 buf;
1423 
1424 	/* clear LAN78xx interrupt status */
1425 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1426 	if (unlikely(ret < 0))
1427 		return ret;
1428 
1429 	mutex_lock(&phydev->lock);
1430 	phy_read_status(phydev);
1431 	link = phydev->link;
1432 	mutex_unlock(&phydev->lock);
1433 
1434 	if (!link && dev->link_on) {
1435 		dev->link_on = false;
1436 
1437 		/* reset MAC */
1438 		ret = lan78xx_mac_reset(dev);
1439 		if (ret < 0)
1440 			return ret;
1441 
1442 		del_timer(&dev->stat_monitor);
1443 	} else if (link && !dev->link_on) {
1444 		dev->link_on = true;
1445 
1446 		phy_ethtool_ksettings_get(phydev, &ecmd);
1447 
1448 		if (dev->udev->speed == USB_SPEED_SUPER) {
1449 			if (ecmd.base.speed == 1000) {
1450 				/* disable U2 */
1451 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1452 				if (ret < 0)
1453 					return ret;
1454 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1455 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1456 				if (ret < 0)
1457 					return ret;
1458 				/* enable U1 */
1459 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1460 				if (ret < 0)
1461 					return ret;
1462 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1463 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1464 				if (ret < 0)
1465 					return ret;
1466 			} else {
1467 				/* enable U1 & U2 */
1468 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469 				if (ret < 0)
1470 					return ret;
1471 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1472 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1473 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1474 				if (ret < 0)
1475 					return ret;
1476 			}
1477 		}
1478 
1479 		ladv = phy_read(phydev, MII_ADVERTISE);
1480 		if (ladv < 0)
1481 			return ladv;
1482 
1483 		radv = phy_read(phydev, MII_LPA);
1484 		if (radv < 0)
1485 			return radv;
1486 
1487 		netif_dbg(dev, link, dev->net,
1488 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1489 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1490 
1491 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1492 						 radv);
1493 		if (ret < 0)
1494 			return ret;
1495 
1496 		if (!timer_pending(&dev->stat_monitor)) {
1497 			dev->delta = 1;
1498 			mod_timer(&dev->stat_monitor,
1499 				  jiffies + STAT_UPDATE_TIMER);
1500 		}
1501 
1502 		lan78xx_rx_urb_submit_all(dev);
1503 
1504 		local_bh_disable();
1505 		napi_schedule(&dev->napi);
1506 		local_bh_enable();
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /* some work can't be done in tasklets, so we use keventd
1513  *
1514  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1515  * but tasklet_schedule() doesn't.	hope the failure is rare.
1516  */
1517 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1518 {
1519 	set_bit(work, &dev->flags);
1520 	if (!schedule_delayed_work(&dev->wq, 0))
1521 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1522 }
1523 
1524 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1525 {
1526 	u32 intdata;
1527 
1528 	if (urb->actual_length != 4) {
1529 		netdev_warn(dev->net,
1530 			    "unexpected urb length %d", urb->actual_length);
1531 		return;
1532 	}
1533 
1534 	intdata = get_unaligned_le32(urb->transfer_buffer);
1535 
1536 	if (intdata & INT_ENP_PHY_INT) {
1537 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1538 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1539 
1540 		if (dev->domain_data.phyirq > 0)
1541 			generic_handle_irq_safe(dev->domain_data.phyirq);
1542 	} else {
1543 		netdev_warn(dev->net,
1544 			    "unexpected interrupt: 0x%08x\n", intdata);
1545 	}
1546 }
1547 
1548 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1549 {
1550 	return MAX_EEPROM_SIZE;
1551 }
1552 
1553 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1554 				      struct ethtool_eeprom *ee, u8 *data)
1555 {
1556 	struct lan78xx_net *dev = netdev_priv(netdev);
1557 	int ret;
1558 
1559 	ret = usb_autopm_get_interface(dev->intf);
1560 	if (ret)
1561 		return ret;
1562 
1563 	ee->magic = LAN78XX_EEPROM_MAGIC;
1564 
1565 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1566 
1567 	usb_autopm_put_interface(dev->intf);
1568 
1569 	return ret;
1570 }
1571 
1572 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1573 				      struct ethtool_eeprom *ee, u8 *data)
1574 {
1575 	struct lan78xx_net *dev = netdev_priv(netdev);
1576 	int ret;
1577 
1578 	ret = usb_autopm_get_interface(dev->intf);
1579 	if (ret)
1580 		return ret;
1581 
1582 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1583 	 * to load data from EEPROM
1584 	 */
1585 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1586 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1587 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1588 		 (ee->offset == 0) &&
1589 		 (ee->len == 512) &&
1590 		 (data[0] == OTP_INDICATOR_1))
1591 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1592 
1593 	usb_autopm_put_interface(dev->intf);
1594 
1595 	return ret;
1596 }
1597 
1598 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1599 				u8 *data)
1600 {
1601 	if (stringset == ETH_SS_STATS)
1602 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1603 }
1604 
1605 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1606 {
1607 	if (sset == ETH_SS_STATS)
1608 		return ARRAY_SIZE(lan78xx_gstrings);
1609 	else
1610 		return -EOPNOTSUPP;
1611 }
1612 
1613 static void lan78xx_get_stats(struct net_device *netdev,
1614 			      struct ethtool_stats *stats, u64 *data)
1615 {
1616 	struct lan78xx_net *dev = netdev_priv(netdev);
1617 
1618 	lan78xx_update_stats(dev);
1619 
1620 	mutex_lock(&dev->stats.access_lock);
1621 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1622 	mutex_unlock(&dev->stats.access_lock);
1623 }
1624 
1625 static void lan78xx_get_wol(struct net_device *netdev,
1626 			    struct ethtool_wolinfo *wol)
1627 {
1628 	struct lan78xx_net *dev = netdev_priv(netdev);
1629 	int ret;
1630 	u32 buf;
1631 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1632 
1633 	if (usb_autopm_get_interface(dev->intf) < 0)
1634 		return;
1635 
1636 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1637 	if (unlikely(ret < 0)) {
1638 		wol->supported = 0;
1639 		wol->wolopts = 0;
1640 	} else {
1641 		if (buf & USB_CFG_RMT_WKP_) {
1642 			wol->supported = WAKE_ALL;
1643 			wol->wolopts = pdata->wol;
1644 		} else {
1645 			wol->supported = 0;
1646 			wol->wolopts = 0;
1647 		}
1648 	}
1649 
1650 	usb_autopm_put_interface(dev->intf);
1651 }
1652 
1653 static int lan78xx_set_wol(struct net_device *netdev,
1654 			   struct ethtool_wolinfo *wol)
1655 {
1656 	struct lan78xx_net *dev = netdev_priv(netdev);
1657 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1658 	int ret;
1659 
1660 	ret = usb_autopm_get_interface(dev->intf);
1661 	if (ret < 0)
1662 		return ret;
1663 
1664 	if (wol->wolopts & ~WAKE_ALL)
1665 		return -EINVAL;
1666 
1667 	pdata->wol = wol->wolopts;
1668 
1669 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1670 
1671 	phy_ethtool_set_wol(netdev->phydev, wol);
1672 
1673 	usb_autopm_put_interface(dev->intf);
1674 
1675 	return ret;
1676 }
1677 
1678 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1679 {
1680 	struct lan78xx_net *dev = netdev_priv(net);
1681 	struct phy_device *phydev = net->phydev;
1682 	int ret;
1683 	u32 buf;
1684 
1685 	ret = usb_autopm_get_interface(dev->intf);
1686 	if (ret < 0)
1687 		return ret;
1688 
1689 	ret = phy_ethtool_get_eee(phydev, edata);
1690 	if (ret < 0)
1691 		goto exit;
1692 
1693 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1694 	if (buf & MAC_CR_EEE_EN_) {
1695 		edata->eee_enabled = true;
1696 		edata->tx_lpi_enabled = true;
1697 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1698 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1699 		edata->tx_lpi_timer = buf;
1700 	} else {
1701 		edata->eee_enabled = false;
1702 		edata->eee_active = false;
1703 		edata->tx_lpi_enabled = false;
1704 		edata->tx_lpi_timer = 0;
1705 	}
1706 
1707 	ret = 0;
1708 exit:
1709 	usb_autopm_put_interface(dev->intf);
1710 
1711 	return ret;
1712 }
1713 
1714 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1715 {
1716 	struct lan78xx_net *dev = netdev_priv(net);
1717 	int ret;
1718 	u32 buf;
1719 
1720 	ret = usb_autopm_get_interface(dev->intf);
1721 	if (ret < 0)
1722 		return ret;
1723 
1724 	if (edata->eee_enabled) {
1725 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1726 		buf |= MAC_CR_EEE_EN_;
1727 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1728 
1729 		phy_ethtool_set_eee(net->phydev, edata);
1730 
1731 		buf = (u32)edata->tx_lpi_timer;
1732 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1733 	} else {
1734 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1735 		buf &= ~MAC_CR_EEE_EN_;
1736 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1737 	}
1738 
1739 	usb_autopm_put_interface(dev->intf);
1740 
1741 	return 0;
1742 }
1743 
1744 static u32 lan78xx_get_link(struct net_device *net)
1745 {
1746 	u32 link;
1747 
1748 	mutex_lock(&net->phydev->lock);
1749 	phy_read_status(net->phydev);
1750 	link = net->phydev->link;
1751 	mutex_unlock(&net->phydev->lock);
1752 
1753 	return link;
1754 }
1755 
1756 static void lan78xx_get_drvinfo(struct net_device *net,
1757 				struct ethtool_drvinfo *info)
1758 {
1759 	struct lan78xx_net *dev = netdev_priv(net);
1760 
1761 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1762 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1763 }
1764 
1765 static u32 lan78xx_get_msglevel(struct net_device *net)
1766 {
1767 	struct lan78xx_net *dev = netdev_priv(net);
1768 
1769 	return dev->msg_enable;
1770 }
1771 
1772 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1773 {
1774 	struct lan78xx_net *dev = netdev_priv(net);
1775 
1776 	dev->msg_enable = level;
1777 }
1778 
1779 static int lan78xx_get_link_ksettings(struct net_device *net,
1780 				      struct ethtool_link_ksettings *cmd)
1781 {
1782 	struct lan78xx_net *dev = netdev_priv(net);
1783 	struct phy_device *phydev = net->phydev;
1784 	int ret;
1785 
1786 	ret = usb_autopm_get_interface(dev->intf);
1787 	if (ret < 0)
1788 		return ret;
1789 
1790 	phy_ethtool_ksettings_get(phydev, cmd);
1791 
1792 	usb_autopm_put_interface(dev->intf);
1793 
1794 	return ret;
1795 }
1796 
1797 static int lan78xx_set_link_ksettings(struct net_device *net,
1798 				      const struct ethtool_link_ksettings *cmd)
1799 {
1800 	struct lan78xx_net *dev = netdev_priv(net);
1801 	struct phy_device *phydev = net->phydev;
1802 	int ret = 0;
1803 	int temp;
1804 
1805 	ret = usb_autopm_get_interface(dev->intf);
1806 	if (ret < 0)
1807 		return ret;
1808 
1809 	/* change speed & duplex */
1810 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1811 
1812 	if (!cmd->base.autoneg) {
1813 		/* force link down */
1814 		temp = phy_read(phydev, MII_BMCR);
1815 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1816 		mdelay(1);
1817 		phy_write(phydev, MII_BMCR, temp);
1818 	}
1819 
1820 	usb_autopm_put_interface(dev->intf);
1821 
1822 	return ret;
1823 }
1824 
1825 static void lan78xx_get_pause(struct net_device *net,
1826 			      struct ethtool_pauseparam *pause)
1827 {
1828 	struct lan78xx_net *dev = netdev_priv(net);
1829 	struct phy_device *phydev = net->phydev;
1830 	struct ethtool_link_ksettings ecmd;
1831 
1832 	phy_ethtool_ksettings_get(phydev, &ecmd);
1833 
1834 	pause->autoneg = dev->fc_autoneg;
1835 
1836 	if (dev->fc_request_control & FLOW_CTRL_TX)
1837 		pause->tx_pause = 1;
1838 
1839 	if (dev->fc_request_control & FLOW_CTRL_RX)
1840 		pause->rx_pause = 1;
1841 }
1842 
1843 static int lan78xx_set_pause(struct net_device *net,
1844 			     struct ethtool_pauseparam *pause)
1845 {
1846 	struct lan78xx_net *dev = netdev_priv(net);
1847 	struct phy_device *phydev = net->phydev;
1848 	struct ethtool_link_ksettings ecmd;
1849 	int ret;
1850 
1851 	phy_ethtool_ksettings_get(phydev, &ecmd);
1852 
1853 	if (pause->autoneg && !ecmd.base.autoneg) {
1854 		ret = -EINVAL;
1855 		goto exit;
1856 	}
1857 
1858 	dev->fc_request_control = 0;
1859 	if (pause->rx_pause)
1860 		dev->fc_request_control |= FLOW_CTRL_RX;
1861 
1862 	if (pause->tx_pause)
1863 		dev->fc_request_control |= FLOW_CTRL_TX;
1864 
1865 	if (ecmd.base.autoneg) {
1866 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1867 		u32 mii_adv;
1868 
1869 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1870 				   ecmd.link_modes.advertising);
1871 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1872 				   ecmd.link_modes.advertising);
1873 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1874 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1875 		linkmode_or(ecmd.link_modes.advertising, fc,
1876 			    ecmd.link_modes.advertising);
1877 
1878 		phy_ethtool_ksettings_set(phydev, &ecmd);
1879 	}
1880 
1881 	dev->fc_autoneg = pause->autoneg;
1882 
1883 	ret = 0;
1884 exit:
1885 	return ret;
1886 }
1887 
1888 static int lan78xx_get_regs_len(struct net_device *netdev)
1889 {
1890 	if (!netdev->phydev)
1891 		return (sizeof(lan78xx_regs));
1892 	else
1893 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1894 }
1895 
1896 static void
1897 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1898 		 void *buf)
1899 {
1900 	u32 *data = buf;
1901 	int i, j;
1902 	struct lan78xx_net *dev = netdev_priv(netdev);
1903 
1904 	/* Read Device/MAC registers */
1905 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1906 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1907 
1908 	if (!netdev->phydev)
1909 		return;
1910 
1911 	/* Read PHY registers */
1912 	for (j = 0; j < 32; i++, j++)
1913 		data[i] = phy_read(netdev->phydev, j);
1914 }
1915 
1916 static const struct ethtool_ops lan78xx_ethtool_ops = {
1917 	.get_link	= lan78xx_get_link,
1918 	.nway_reset	= phy_ethtool_nway_reset,
1919 	.get_drvinfo	= lan78xx_get_drvinfo,
1920 	.get_msglevel	= lan78xx_get_msglevel,
1921 	.set_msglevel	= lan78xx_set_msglevel,
1922 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1923 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1924 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1925 	.get_ethtool_stats = lan78xx_get_stats,
1926 	.get_sset_count = lan78xx_get_sset_count,
1927 	.get_strings	= lan78xx_get_strings,
1928 	.get_wol	= lan78xx_get_wol,
1929 	.set_wol	= lan78xx_set_wol,
1930 	.get_ts_info	= ethtool_op_get_ts_info,
1931 	.get_eee	= lan78xx_get_eee,
1932 	.set_eee	= lan78xx_set_eee,
1933 	.get_pauseparam	= lan78xx_get_pause,
1934 	.set_pauseparam	= lan78xx_set_pause,
1935 	.get_link_ksettings = lan78xx_get_link_ksettings,
1936 	.set_link_ksettings = lan78xx_set_link_ksettings,
1937 	.get_regs_len	= lan78xx_get_regs_len,
1938 	.get_regs	= lan78xx_get_regs,
1939 };
1940 
1941 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1942 {
1943 	u32 addr_lo, addr_hi;
1944 	u8 addr[6];
1945 
1946 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1947 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1948 
1949 	addr[0] = addr_lo & 0xFF;
1950 	addr[1] = (addr_lo >> 8) & 0xFF;
1951 	addr[2] = (addr_lo >> 16) & 0xFF;
1952 	addr[3] = (addr_lo >> 24) & 0xFF;
1953 	addr[4] = addr_hi & 0xFF;
1954 	addr[5] = (addr_hi >> 8) & 0xFF;
1955 
1956 	if (!is_valid_ether_addr(addr)) {
1957 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1958 			/* valid address present in Device Tree */
1959 			netif_dbg(dev, ifup, dev->net,
1960 				  "MAC address read from Device Tree");
1961 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1962 						 ETH_ALEN, addr) == 0) ||
1963 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1964 					      ETH_ALEN, addr) == 0)) &&
1965 			   is_valid_ether_addr(addr)) {
1966 			/* eeprom values are valid so use them */
1967 			netif_dbg(dev, ifup, dev->net,
1968 				  "MAC address read from EEPROM");
1969 		} else {
1970 			/* generate random MAC */
1971 			eth_random_addr(addr);
1972 			netif_dbg(dev, ifup, dev->net,
1973 				  "MAC address set to random addr");
1974 		}
1975 
1976 		addr_lo = addr[0] | (addr[1] << 8) |
1977 			  (addr[2] << 16) | (addr[3] << 24);
1978 		addr_hi = addr[4] | (addr[5] << 8);
1979 
1980 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1981 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1982 	}
1983 
1984 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1985 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1986 
1987 	eth_hw_addr_set(dev->net, addr);
1988 }
1989 
1990 /* MDIO read and write wrappers for phylib */
1991 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1992 {
1993 	struct lan78xx_net *dev = bus->priv;
1994 	u32 val, addr;
1995 	int ret;
1996 
1997 	ret = usb_autopm_get_interface(dev->intf);
1998 	if (ret < 0)
1999 		return ret;
2000 
2001 	mutex_lock(&dev->phy_mutex);
2002 
2003 	/* confirm MII not busy */
2004 	ret = lan78xx_phy_wait_not_busy(dev);
2005 	if (ret < 0)
2006 		goto done;
2007 
2008 	/* set the address, index & direction (read from PHY) */
2009 	addr = mii_access(phy_id, idx, MII_READ);
2010 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2011 
2012 	ret = lan78xx_phy_wait_not_busy(dev);
2013 	if (ret < 0)
2014 		goto done;
2015 
2016 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2017 
2018 	ret = (int)(val & 0xFFFF);
2019 
2020 done:
2021 	mutex_unlock(&dev->phy_mutex);
2022 	usb_autopm_put_interface(dev->intf);
2023 
2024 	return ret;
2025 }
2026 
2027 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2028 				 u16 regval)
2029 {
2030 	struct lan78xx_net *dev = bus->priv;
2031 	u32 val, addr;
2032 	int ret;
2033 
2034 	ret = usb_autopm_get_interface(dev->intf);
2035 	if (ret < 0)
2036 		return ret;
2037 
2038 	mutex_lock(&dev->phy_mutex);
2039 
2040 	/* confirm MII not busy */
2041 	ret = lan78xx_phy_wait_not_busy(dev);
2042 	if (ret < 0)
2043 		goto done;
2044 
2045 	val = (u32)regval;
2046 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2047 
2048 	/* set the address, index & direction (write to PHY) */
2049 	addr = mii_access(phy_id, idx, MII_WRITE);
2050 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2051 
2052 	ret = lan78xx_phy_wait_not_busy(dev);
2053 	if (ret < 0)
2054 		goto done;
2055 
2056 done:
2057 	mutex_unlock(&dev->phy_mutex);
2058 	usb_autopm_put_interface(dev->intf);
2059 	return 0;
2060 }
2061 
2062 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2063 {
2064 	struct device_node *node;
2065 	int ret;
2066 
2067 	dev->mdiobus = mdiobus_alloc();
2068 	if (!dev->mdiobus) {
2069 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2070 		return -ENOMEM;
2071 	}
2072 
2073 	dev->mdiobus->priv = (void *)dev;
2074 	dev->mdiobus->read = lan78xx_mdiobus_read;
2075 	dev->mdiobus->write = lan78xx_mdiobus_write;
2076 	dev->mdiobus->name = "lan78xx-mdiobus";
2077 	dev->mdiobus->parent = &dev->udev->dev;
2078 
2079 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2080 		 dev->udev->bus->busnum, dev->udev->devnum);
2081 
2082 	switch (dev->chipid) {
2083 	case ID_REV_CHIP_ID_7800_:
2084 	case ID_REV_CHIP_ID_7850_:
2085 		/* set to internal PHY id */
2086 		dev->mdiobus->phy_mask = ~(1 << 1);
2087 		break;
2088 	case ID_REV_CHIP_ID_7801_:
2089 		/* scan thru PHYAD[2..0] */
2090 		dev->mdiobus->phy_mask = ~(0xFF);
2091 		break;
2092 	}
2093 
2094 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2095 	ret = of_mdiobus_register(dev->mdiobus, node);
2096 	of_node_put(node);
2097 	if (ret) {
2098 		netdev_err(dev->net, "can't register MDIO bus\n");
2099 		goto exit1;
2100 	}
2101 
2102 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2103 	return 0;
2104 exit1:
2105 	mdiobus_free(dev->mdiobus);
2106 	return ret;
2107 }
2108 
2109 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2110 {
2111 	mdiobus_unregister(dev->mdiobus);
2112 	mdiobus_free(dev->mdiobus);
2113 }
2114 
2115 static void lan78xx_link_status_change(struct net_device *net)
2116 {
2117 	struct phy_device *phydev = net->phydev;
2118 
2119 	phy_print_status(phydev);
2120 }
2121 
2122 static int irq_map(struct irq_domain *d, unsigned int irq,
2123 		   irq_hw_number_t hwirq)
2124 {
2125 	struct irq_domain_data *data = d->host_data;
2126 
2127 	irq_set_chip_data(irq, data);
2128 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2129 	irq_set_noprobe(irq);
2130 
2131 	return 0;
2132 }
2133 
2134 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2135 {
2136 	irq_set_chip_and_handler(irq, NULL, NULL);
2137 	irq_set_chip_data(irq, NULL);
2138 }
2139 
2140 static const struct irq_domain_ops chip_domain_ops = {
2141 	.map	= irq_map,
2142 	.unmap	= irq_unmap,
2143 };
2144 
2145 static void lan78xx_irq_mask(struct irq_data *irqd)
2146 {
2147 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2148 
2149 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2150 }
2151 
2152 static void lan78xx_irq_unmask(struct irq_data *irqd)
2153 {
2154 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2155 
2156 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2157 }
2158 
2159 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2160 {
2161 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2162 
2163 	mutex_lock(&data->irq_lock);
2164 }
2165 
2166 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2167 {
2168 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2169 	struct lan78xx_net *dev =
2170 			container_of(data, struct lan78xx_net, domain_data);
2171 	u32 buf;
2172 
2173 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2174 	 * are only two callbacks executed in non-atomic contex.
2175 	 */
2176 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2177 	if (buf != data->irqenable)
2178 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2179 
2180 	mutex_unlock(&data->irq_lock);
2181 }
2182 
2183 static struct irq_chip lan78xx_irqchip = {
2184 	.name			= "lan78xx-irqs",
2185 	.irq_mask		= lan78xx_irq_mask,
2186 	.irq_unmask		= lan78xx_irq_unmask,
2187 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2188 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2189 };
2190 
2191 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2192 {
2193 	struct device_node *of_node;
2194 	struct irq_domain *irqdomain;
2195 	unsigned int irqmap = 0;
2196 	u32 buf;
2197 	int ret = 0;
2198 
2199 	of_node = dev->udev->dev.parent->of_node;
2200 
2201 	mutex_init(&dev->domain_data.irq_lock);
2202 
2203 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2204 	dev->domain_data.irqenable = buf;
2205 
2206 	dev->domain_data.irqchip = &lan78xx_irqchip;
2207 	dev->domain_data.irq_handler = handle_simple_irq;
2208 
2209 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2210 					  &chip_domain_ops, &dev->domain_data);
2211 	if (irqdomain) {
2212 		/* create mapping for PHY interrupt */
2213 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2214 		if (!irqmap) {
2215 			irq_domain_remove(irqdomain);
2216 
2217 			irqdomain = NULL;
2218 			ret = -EINVAL;
2219 		}
2220 	} else {
2221 		ret = -EINVAL;
2222 	}
2223 
2224 	dev->domain_data.irqdomain = irqdomain;
2225 	dev->domain_data.phyirq = irqmap;
2226 
2227 	return ret;
2228 }
2229 
2230 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2231 {
2232 	if (dev->domain_data.phyirq > 0) {
2233 		irq_dispose_mapping(dev->domain_data.phyirq);
2234 
2235 		if (dev->domain_data.irqdomain)
2236 			irq_domain_remove(dev->domain_data.irqdomain);
2237 	}
2238 	dev->domain_data.phyirq = 0;
2239 	dev->domain_data.irqdomain = NULL;
2240 }
2241 
2242 static int lan8835_fixup(struct phy_device *phydev)
2243 {
2244 	int buf;
2245 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2246 
2247 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2248 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2249 	buf &= ~0x1800;
2250 	buf |= 0x0800;
2251 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2252 
2253 	/* RGMII MAC TXC Delay Enable */
2254 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2255 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2256 
2257 	/* RGMII TX DLL Tune Adjust */
2258 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2259 
2260 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2261 
2262 	return 1;
2263 }
2264 
2265 static int ksz9031rnx_fixup(struct phy_device *phydev)
2266 {
2267 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2268 
2269 	/* Micrel9301RNX PHY configuration */
2270 	/* RGMII Control Signal Pad Skew */
2271 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2272 	/* RGMII RX Data Pad Skew */
2273 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2274 	/* RGMII RX Clock Pad Skew */
2275 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2276 
2277 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2278 
2279 	return 1;
2280 }
2281 
2282 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2283 {
2284 	u32 buf;
2285 	int ret;
2286 	struct fixed_phy_status fphy_status = {
2287 		.link = 1,
2288 		.speed = SPEED_1000,
2289 		.duplex = DUPLEX_FULL,
2290 	};
2291 	struct phy_device *phydev;
2292 
2293 	phydev = phy_find_first(dev->mdiobus);
2294 	if (!phydev) {
2295 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2296 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2297 		if (IS_ERR(phydev)) {
2298 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2299 			return NULL;
2300 		}
2301 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2302 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2303 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2304 					MAC_RGMII_ID_TXC_DELAY_EN_);
2305 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2306 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2307 		buf |= HW_CFG_CLK125_EN_;
2308 		buf |= HW_CFG_REFCLK25_EN_;
2309 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2310 	} else {
2311 		if (!phydev->drv) {
2312 			netdev_err(dev->net, "no PHY driver found\n");
2313 			return NULL;
2314 		}
2315 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2316 		/* external PHY fixup for KSZ9031RNX */
2317 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2318 						 ksz9031rnx_fixup);
2319 		if (ret < 0) {
2320 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2321 			return NULL;
2322 		}
2323 		/* external PHY fixup for LAN8835 */
2324 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2325 						 lan8835_fixup);
2326 		if (ret < 0) {
2327 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2328 			return NULL;
2329 		}
2330 		/* add more external PHY fixup here if needed */
2331 
2332 		phydev->is_internal = false;
2333 	}
2334 	return phydev;
2335 }
2336 
2337 static int lan78xx_phy_init(struct lan78xx_net *dev)
2338 {
2339 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2340 	int ret;
2341 	u32 mii_adv;
2342 	struct phy_device *phydev;
2343 
2344 	switch (dev->chipid) {
2345 	case ID_REV_CHIP_ID_7801_:
2346 		phydev = lan7801_phy_init(dev);
2347 		if (!phydev) {
2348 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2349 			return -EIO;
2350 		}
2351 		break;
2352 
2353 	case ID_REV_CHIP_ID_7800_:
2354 	case ID_REV_CHIP_ID_7850_:
2355 		phydev = phy_find_first(dev->mdiobus);
2356 		if (!phydev) {
2357 			netdev_err(dev->net, "no PHY found\n");
2358 			return -EIO;
2359 		}
2360 		phydev->is_internal = true;
2361 		dev->interface = PHY_INTERFACE_MODE_GMII;
2362 		break;
2363 
2364 	default:
2365 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2366 		return -EIO;
2367 	}
2368 
2369 	/* if phyirq is not set, use polling mode in phylib */
2370 	if (dev->domain_data.phyirq > 0)
2371 		phydev->irq = dev->domain_data.phyirq;
2372 	else
2373 		phydev->irq = PHY_POLL;
2374 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2375 
2376 	/* set to AUTOMDIX */
2377 	phydev->mdix = ETH_TP_MDI_AUTO;
2378 
2379 	ret = phy_connect_direct(dev->net, phydev,
2380 				 lan78xx_link_status_change,
2381 				 dev->interface);
2382 	if (ret) {
2383 		netdev_err(dev->net, "can't attach PHY to %s\n",
2384 			   dev->mdiobus->id);
2385 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2386 			if (phy_is_pseudo_fixed_link(phydev)) {
2387 				fixed_phy_unregister(phydev);
2388 			} else {
2389 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2390 							     0xfffffff0);
2391 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2392 							     0xfffffff0);
2393 			}
2394 		}
2395 		return -EIO;
2396 	}
2397 
2398 	/* MAC doesn't support 1000T Half */
2399 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2400 
2401 	/* support both flow controls */
2402 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2403 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2404 			   phydev->advertising);
2405 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2406 			   phydev->advertising);
2407 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2408 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2409 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2410 
2411 	if (phydev->mdio.dev.of_node) {
2412 		u32 reg;
2413 		int len;
2414 
2415 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2416 						      "microchip,led-modes",
2417 						      sizeof(u32));
2418 		if (len >= 0) {
2419 			/* Ensure the appropriate LEDs are enabled */
2420 			lan78xx_read_reg(dev, HW_CFG, &reg);
2421 			reg &= ~(HW_CFG_LED0_EN_ |
2422 				 HW_CFG_LED1_EN_ |
2423 				 HW_CFG_LED2_EN_ |
2424 				 HW_CFG_LED3_EN_);
2425 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2426 				(len > 1) * HW_CFG_LED1_EN_ |
2427 				(len > 2) * HW_CFG_LED2_EN_ |
2428 				(len > 3) * HW_CFG_LED3_EN_;
2429 			lan78xx_write_reg(dev, HW_CFG, reg);
2430 		}
2431 	}
2432 
2433 	genphy_config_aneg(phydev);
2434 
2435 	dev->fc_autoneg = phydev->autoneg;
2436 
2437 	return 0;
2438 }
2439 
2440 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2441 {
2442 	u32 buf;
2443 	bool rxenabled;
2444 
2445 	lan78xx_read_reg(dev, MAC_RX, &buf);
2446 
2447 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2448 
2449 	if (rxenabled) {
2450 		buf &= ~MAC_RX_RXEN_;
2451 		lan78xx_write_reg(dev, MAC_RX, buf);
2452 	}
2453 
2454 	/* add 4 to size for FCS */
2455 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2456 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2457 
2458 	lan78xx_write_reg(dev, MAC_RX, buf);
2459 
2460 	if (rxenabled) {
2461 		buf |= MAC_RX_RXEN_;
2462 		lan78xx_write_reg(dev, MAC_RX, buf);
2463 	}
2464 
2465 	return 0;
2466 }
2467 
2468 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2469 {
2470 	struct sk_buff *skb;
2471 	unsigned long flags;
2472 	int count = 0;
2473 
2474 	spin_lock_irqsave(&q->lock, flags);
2475 	while (!skb_queue_empty(q)) {
2476 		struct skb_data	*entry;
2477 		struct urb *urb;
2478 		int ret;
2479 
2480 		skb_queue_walk(q, skb) {
2481 			entry = (struct skb_data *)skb->cb;
2482 			if (entry->state != unlink_start)
2483 				goto found;
2484 		}
2485 		break;
2486 found:
2487 		entry->state = unlink_start;
2488 		urb = entry->urb;
2489 
2490 		/* Get reference count of the URB to avoid it to be
2491 		 * freed during usb_unlink_urb, which may trigger
2492 		 * use-after-free problem inside usb_unlink_urb since
2493 		 * usb_unlink_urb is always racing with .complete
2494 		 * handler(include defer_bh).
2495 		 */
2496 		usb_get_urb(urb);
2497 		spin_unlock_irqrestore(&q->lock, flags);
2498 		/* during some PM-driven resume scenarios,
2499 		 * these (async) unlinks complete immediately
2500 		 */
2501 		ret = usb_unlink_urb(urb);
2502 		if (ret != -EINPROGRESS && ret != 0)
2503 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2504 		else
2505 			count++;
2506 		usb_put_urb(urb);
2507 		spin_lock_irqsave(&q->lock, flags);
2508 	}
2509 	spin_unlock_irqrestore(&q->lock, flags);
2510 	return count;
2511 }
2512 
2513 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2514 {
2515 	struct lan78xx_net *dev = netdev_priv(netdev);
2516 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2517 	int ret;
2518 
2519 	/* no second zero-length packet read wanted after mtu-sized packets */
2520 	if ((max_frame_len % dev->maxpacket) == 0)
2521 		return -EDOM;
2522 
2523 	ret = usb_autopm_get_interface(dev->intf);
2524 	if (ret < 0)
2525 		return ret;
2526 
2527 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2528 	if (!ret)
2529 		netdev->mtu = new_mtu;
2530 
2531 	usb_autopm_put_interface(dev->intf);
2532 
2533 	return ret;
2534 }
2535 
2536 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2537 {
2538 	struct lan78xx_net *dev = netdev_priv(netdev);
2539 	struct sockaddr *addr = p;
2540 	u32 addr_lo, addr_hi;
2541 
2542 	if (netif_running(netdev))
2543 		return -EBUSY;
2544 
2545 	if (!is_valid_ether_addr(addr->sa_data))
2546 		return -EADDRNOTAVAIL;
2547 
2548 	eth_hw_addr_set(netdev, addr->sa_data);
2549 
2550 	addr_lo = netdev->dev_addr[0] |
2551 		  netdev->dev_addr[1] << 8 |
2552 		  netdev->dev_addr[2] << 16 |
2553 		  netdev->dev_addr[3] << 24;
2554 	addr_hi = netdev->dev_addr[4] |
2555 		  netdev->dev_addr[5] << 8;
2556 
2557 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2558 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2559 
2560 	/* Added to support MAC address changes */
2561 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2562 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2563 
2564 	return 0;
2565 }
2566 
2567 /* Enable or disable Rx checksum offload engine */
2568 static int lan78xx_set_features(struct net_device *netdev,
2569 				netdev_features_t features)
2570 {
2571 	struct lan78xx_net *dev = netdev_priv(netdev);
2572 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2573 	unsigned long flags;
2574 
2575 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2576 
2577 	if (features & NETIF_F_RXCSUM) {
2578 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2579 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2580 	} else {
2581 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2582 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2583 	}
2584 
2585 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2586 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2587 	else
2588 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2589 
2590 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2591 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2592 	else
2593 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2594 
2595 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2596 
2597 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2598 
2599 	return 0;
2600 }
2601 
2602 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2603 {
2604 	struct lan78xx_priv *pdata =
2605 			container_of(param, struct lan78xx_priv, set_vlan);
2606 	struct lan78xx_net *dev = pdata->dev;
2607 
2608 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2609 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2610 }
2611 
2612 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2613 				   __be16 proto, u16 vid)
2614 {
2615 	struct lan78xx_net *dev = netdev_priv(netdev);
2616 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2617 	u16 vid_bit_index;
2618 	u16 vid_dword_index;
2619 
2620 	vid_dword_index = (vid >> 5) & 0x7F;
2621 	vid_bit_index = vid & 0x1F;
2622 
2623 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2624 
2625 	/* defer register writes to a sleepable context */
2626 	schedule_work(&pdata->set_vlan);
2627 
2628 	return 0;
2629 }
2630 
2631 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2632 				    __be16 proto, u16 vid)
2633 {
2634 	struct lan78xx_net *dev = netdev_priv(netdev);
2635 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2636 	u16 vid_bit_index;
2637 	u16 vid_dword_index;
2638 
2639 	vid_dword_index = (vid >> 5) & 0x7F;
2640 	vid_bit_index = vid & 0x1F;
2641 
2642 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2643 
2644 	/* defer register writes to a sleepable context */
2645 	schedule_work(&pdata->set_vlan);
2646 
2647 	return 0;
2648 }
2649 
2650 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2651 {
2652 	int ret;
2653 	u32 buf;
2654 	u32 regs[6] = { 0 };
2655 
2656 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2657 	if (buf & USB_CFG1_LTM_ENABLE_) {
2658 		u8 temp[2];
2659 		/* Get values from EEPROM first */
2660 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2661 			if (temp[0] == 24) {
2662 				ret = lan78xx_read_raw_eeprom(dev,
2663 							      temp[1] * 2,
2664 							      24,
2665 							      (u8 *)regs);
2666 				if (ret < 0)
2667 					return;
2668 			}
2669 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2670 			if (temp[0] == 24) {
2671 				ret = lan78xx_read_raw_otp(dev,
2672 							   temp[1] * 2,
2673 							   24,
2674 							   (u8 *)regs);
2675 				if (ret < 0)
2676 					return;
2677 			}
2678 		}
2679 	}
2680 
2681 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2682 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2683 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2684 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2685 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2686 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2687 }
2688 
2689 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2690 {
2691 	int result = 0;
2692 
2693 	switch (dev->udev->speed) {
2694 	case USB_SPEED_SUPER:
2695 		dev->rx_urb_size = RX_SS_URB_SIZE;
2696 		dev->tx_urb_size = TX_SS_URB_SIZE;
2697 		dev->n_rx_urbs = RX_SS_URB_NUM;
2698 		dev->n_tx_urbs = TX_SS_URB_NUM;
2699 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2700 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2701 		break;
2702 	case USB_SPEED_HIGH:
2703 		dev->rx_urb_size = RX_HS_URB_SIZE;
2704 		dev->tx_urb_size = TX_HS_URB_SIZE;
2705 		dev->n_rx_urbs = RX_HS_URB_NUM;
2706 		dev->n_tx_urbs = TX_HS_URB_NUM;
2707 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2708 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2709 		break;
2710 	case USB_SPEED_FULL:
2711 		dev->rx_urb_size = RX_FS_URB_SIZE;
2712 		dev->tx_urb_size = TX_FS_URB_SIZE;
2713 		dev->n_rx_urbs = RX_FS_URB_NUM;
2714 		dev->n_tx_urbs = TX_FS_URB_NUM;
2715 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2716 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2717 		break;
2718 	default:
2719 		netdev_warn(dev->net, "USB bus speed not supported\n");
2720 		result = -EIO;
2721 		break;
2722 	}
2723 
2724 	return result;
2725 }
2726 
2727 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2728 {
2729 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2730 }
2731 
2732 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2733 			   u32 hw_disabled)
2734 {
2735 	unsigned long timeout;
2736 	bool stopped = true;
2737 	int ret;
2738 	u32 buf;
2739 
2740 	/* Stop the h/w block (if not already stopped) */
2741 
2742 	ret = lan78xx_read_reg(dev, reg, &buf);
2743 	if (ret < 0)
2744 		return ret;
2745 
2746 	if (buf & hw_enabled) {
2747 		buf &= ~hw_enabled;
2748 
2749 		ret = lan78xx_write_reg(dev, reg, buf);
2750 		if (ret < 0)
2751 			return ret;
2752 
2753 		stopped = false;
2754 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2755 		do  {
2756 			ret = lan78xx_read_reg(dev, reg, &buf);
2757 			if (ret < 0)
2758 				return ret;
2759 
2760 			if (buf & hw_disabled)
2761 				stopped = true;
2762 			else
2763 				msleep(HW_DISABLE_DELAY_MS);
2764 		} while (!stopped && !time_after(jiffies, timeout));
2765 	}
2766 
2767 	ret = stopped ? 0 : -ETIME;
2768 
2769 	return ret;
2770 }
2771 
2772 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2773 {
2774 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2775 }
2776 
2777 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2778 {
2779 	int ret;
2780 
2781 	netif_dbg(dev, drv, dev->net, "start tx path");
2782 
2783 	/* Start the MAC transmitter */
2784 
2785 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2786 	if (ret < 0)
2787 		return ret;
2788 
2789 	/* Start the Tx FIFO */
2790 
2791 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2792 	if (ret < 0)
2793 		return ret;
2794 
2795 	return 0;
2796 }
2797 
2798 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2799 {
2800 	int ret;
2801 
2802 	netif_dbg(dev, drv, dev->net, "stop tx path");
2803 
2804 	/* Stop the Tx FIFO */
2805 
2806 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2807 	if (ret < 0)
2808 		return ret;
2809 
2810 	/* Stop the MAC transmitter */
2811 
2812 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2813 	if (ret < 0)
2814 		return ret;
2815 
2816 	return 0;
2817 }
2818 
2819 /* The caller must ensure the Tx path is stopped before calling
2820  * lan78xx_flush_tx_fifo().
2821  */
2822 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2823 {
2824 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2825 }
2826 
2827 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2828 {
2829 	int ret;
2830 
2831 	netif_dbg(dev, drv, dev->net, "start rx path");
2832 
2833 	/* Start the Rx FIFO */
2834 
2835 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2836 	if (ret < 0)
2837 		return ret;
2838 
2839 	/* Start the MAC receiver*/
2840 
2841 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2842 	if (ret < 0)
2843 		return ret;
2844 
2845 	return 0;
2846 }
2847 
2848 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2849 {
2850 	int ret;
2851 
2852 	netif_dbg(dev, drv, dev->net, "stop rx path");
2853 
2854 	/* Stop the MAC receiver */
2855 
2856 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2857 	if (ret < 0)
2858 		return ret;
2859 
2860 	/* Stop the Rx FIFO */
2861 
2862 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2863 	if (ret < 0)
2864 		return ret;
2865 
2866 	return 0;
2867 }
2868 
2869 /* The caller must ensure the Rx path is stopped before calling
2870  * lan78xx_flush_rx_fifo().
2871  */
2872 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2873 {
2874 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2875 }
2876 
2877 static int lan78xx_reset(struct lan78xx_net *dev)
2878 {
2879 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2880 	unsigned long timeout;
2881 	int ret;
2882 	u32 buf;
2883 	u8 sig;
2884 
2885 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2886 	if (ret < 0)
2887 		return ret;
2888 
2889 	buf |= HW_CFG_LRST_;
2890 
2891 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2892 	if (ret < 0)
2893 		return ret;
2894 
2895 	timeout = jiffies + HZ;
2896 	do {
2897 		mdelay(1);
2898 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2899 		if (ret < 0)
2900 			return ret;
2901 
2902 		if (time_after(jiffies, timeout)) {
2903 			netdev_warn(dev->net,
2904 				    "timeout on completion of LiteReset");
2905 			ret = -ETIMEDOUT;
2906 			return ret;
2907 		}
2908 	} while (buf & HW_CFG_LRST_);
2909 
2910 	lan78xx_init_mac_address(dev);
2911 
2912 	/* save DEVID for later usage */
2913 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2914 	if (ret < 0)
2915 		return ret;
2916 
2917 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2918 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2919 
2920 	/* Respond to the IN token with a NAK */
2921 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2922 	if (ret < 0)
2923 		return ret;
2924 
2925 	buf |= USB_CFG_BIR_;
2926 
2927 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2928 	if (ret < 0)
2929 		return ret;
2930 
2931 	/* Init LTM */
2932 	lan78xx_init_ltm(dev);
2933 
2934 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2935 	if (ret < 0)
2936 		return ret;
2937 
2938 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2939 	if (ret < 0)
2940 		return ret;
2941 
2942 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2943 	if (ret < 0)
2944 		return ret;
2945 
2946 	buf |= HW_CFG_MEF_;
2947 
2948 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2949 	if (ret < 0)
2950 		return ret;
2951 
2952 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2953 	if (ret < 0)
2954 		return ret;
2955 
2956 	buf |= USB_CFG_BCE_;
2957 
2958 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2959 	if (ret < 0)
2960 		return ret;
2961 
2962 	/* set FIFO sizes */
2963 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2964 
2965 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2970 
2971 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	ret = lan78xx_write_reg(dev, FLOW, 0);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	/* Don't need rfe_ctl_lock during initialisation */
2988 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2993 
2994 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2995 	if (ret < 0)
2996 		return ret;
2997 
2998 	/* Enable or disable checksum offload engines */
2999 	ret = lan78xx_set_features(dev->net, dev->net->features);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	lan78xx_set_multicast(dev->net);
3004 
3005 	/* reset PHY */
3006 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3007 	if (ret < 0)
3008 		return ret;
3009 
3010 	buf |= PMT_CTL_PHY_RST_;
3011 
3012 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3013 	if (ret < 0)
3014 		return ret;
3015 
3016 	timeout = jiffies + HZ;
3017 	do {
3018 		mdelay(1);
3019 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3020 		if (ret < 0)
3021 			return ret;
3022 
3023 		if (time_after(jiffies, timeout)) {
3024 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3025 			ret = -ETIMEDOUT;
3026 			return ret;
3027 		}
3028 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3029 
3030 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3031 	if (ret < 0)
3032 		return ret;
3033 
3034 	/* LAN7801 only has RGMII mode */
3035 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3036 		buf &= ~MAC_CR_GMII_EN_;
3037 
3038 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3039 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3040 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3041 		if (!ret && sig != EEPROM_INDICATOR) {
3042 			/* Implies there is no external eeprom. Set mac speed */
3043 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3044 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3045 		}
3046 	}
3047 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3048 	if (ret < 0)
3049 		return ret;
3050 
3051 	ret = lan78xx_set_rx_max_frame_length(dev,
3052 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3053 
3054 	return ret;
3055 }
3056 
3057 static void lan78xx_init_stats(struct lan78xx_net *dev)
3058 {
3059 	u32 *p;
3060 	int i;
3061 
3062 	/* initialize for stats update
3063 	 * some counters are 20bits and some are 32bits
3064 	 */
3065 	p = (u32 *)&dev->stats.rollover_max;
3066 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3067 		p[i] = 0xFFFFF;
3068 
3069 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3070 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3071 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3072 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3073 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3074 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3075 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3076 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3077 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3078 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3079 
3080 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3081 }
3082 
3083 static int lan78xx_open(struct net_device *net)
3084 {
3085 	struct lan78xx_net *dev = netdev_priv(net);
3086 	int ret;
3087 
3088 	netif_dbg(dev, ifup, dev->net, "open device");
3089 
3090 	ret = usb_autopm_get_interface(dev->intf);
3091 	if (ret < 0)
3092 		return ret;
3093 
3094 	mutex_lock(&dev->dev_mutex);
3095 
3096 	phy_start(net->phydev);
3097 
3098 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3099 
3100 	/* for Link Check */
3101 	if (dev->urb_intr) {
3102 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3103 		if (ret < 0) {
3104 			netif_err(dev, ifup, dev->net,
3105 				  "intr submit %d\n", ret);
3106 			goto done;
3107 		}
3108 	}
3109 
3110 	ret = lan78xx_flush_rx_fifo(dev);
3111 	if (ret < 0)
3112 		goto done;
3113 	ret = lan78xx_flush_tx_fifo(dev);
3114 	if (ret < 0)
3115 		goto done;
3116 
3117 	ret = lan78xx_start_tx_path(dev);
3118 	if (ret < 0)
3119 		goto done;
3120 	ret = lan78xx_start_rx_path(dev);
3121 	if (ret < 0)
3122 		goto done;
3123 
3124 	lan78xx_init_stats(dev);
3125 
3126 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3127 
3128 	netif_start_queue(net);
3129 
3130 	dev->link_on = false;
3131 
3132 	napi_enable(&dev->napi);
3133 
3134 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3135 done:
3136 	mutex_unlock(&dev->dev_mutex);
3137 
3138 	if (ret < 0)
3139 		usb_autopm_put_interface(dev->intf);
3140 
3141 	return ret;
3142 }
3143 
3144 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3145 {
3146 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3147 	DECLARE_WAITQUEUE(wait, current);
3148 	int temp;
3149 
3150 	/* ensure there are no more active urbs */
3151 	add_wait_queue(&unlink_wakeup, &wait);
3152 	set_current_state(TASK_UNINTERRUPTIBLE);
3153 	dev->wait = &unlink_wakeup;
3154 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3155 
3156 	/* maybe wait for deletions to finish. */
3157 	while (!skb_queue_empty(&dev->rxq) ||
3158 	       !skb_queue_empty(&dev->txq)) {
3159 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3160 		set_current_state(TASK_UNINTERRUPTIBLE);
3161 		netif_dbg(dev, ifdown, dev->net,
3162 			  "waited for %d urb completions", temp);
3163 	}
3164 	set_current_state(TASK_RUNNING);
3165 	dev->wait = NULL;
3166 	remove_wait_queue(&unlink_wakeup, &wait);
3167 
3168 	/* empty Rx done, Rx overflow and Tx pend queues
3169 	 */
3170 	while (!skb_queue_empty(&dev->rxq_done)) {
3171 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3172 
3173 		lan78xx_release_rx_buf(dev, skb);
3174 	}
3175 
3176 	skb_queue_purge(&dev->rxq_overflow);
3177 	skb_queue_purge(&dev->txq_pend);
3178 }
3179 
3180 static int lan78xx_stop(struct net_device *net)
3181 {
3182 	struct lan78xx_net *dev = netdev_priv(net);
3183 
3184 	netif_dbg(dev, ifup, dev->net, "stop device");
3185 
3186 	mutex_lock(&dev->dev_mutex);
3187 
3188 	if (timer_pending(&dev->stat_monitor))
3189 		del_timer_sync(&dev->stat_monitor);
3190 
3191 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3192 	netif_stop_queue(net);
3193 	napi_disable(&dev->napi);
3194 
3195 	lan78xx_terminate_urbs(dev);
3196 
3197 	netif_info(dev, ifdown, dev->net,
3198 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3199 		   net->stats.rx_packets, net->stats.tx_packets,
3200 		   net->stats.rx_errors, net->stats.tx_errors);
3201 
3202 	/* ignore errors that occur stopping the Tx and Rx data paths */
3203 	lan78xx_stop_tx_path(dev);
3204 	lan78xx_stop_rx_path(dev);
3205 
3206 	if (net->phydev)
3207 		phy_stop(net->phydev);
3208 
3209 	usb_kill_urb(dev->urb_intr);
3210 
3211 	/* deferred work (task, timer, softirq) must also stop.
3212 	 * can't flush_scheduled_work() until we drop rtnl (later),
3213 	 * else workers could deadlock; so make workers a NOP.
3214 	 */
3215 	clear_bit(EVENT_TX_HALT, &dev->flags);
3216 	clear_bit(EVENT_RX_HALT, &dev->flags);
3217 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3218 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3219 
3220 	cancel_delayed_work_sync(&dev->wq);
3221 
3222 	usb_autopm_put_interface(dev->intf);
3223 
3224 	mutex_unlock(&dev->dev_mutex);
3225 
3226 	return 0;
3227 }
3228 
3229 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3230 			       struct sk_buff_head *list, enum skb_state state)
3231 {
3232 	unsigned long flags;
3233 	enum skb_state old_state;
3234 	struct skb_data *entry = (struct skb_data *)skb->cb;
3235 
3236 	spin_lock_irqsave(&list->lock, flags);
3237 	old_state = entry->state;
3238 	entry->state = state;
3239 
3240 	__skb_unlink(skb, list);
3241 	spin_unlock(&list->lock);
3242 	spin_lock(&dev->rxq_done.lock);
3243 
3244 	__skb_queue_tail(&dev->rxq_done, skb);
3245 	if (skb_queue_len(&dev->rxq_done) == 1)
3246 		napi_schedule(&dev->napi);
3247 
3248 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3249 
3250 	return old_state;
3251 }
3252 
3253 static void tx_complete(struct urb *urb)
3254 {
3255 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3256 	struct skb_data *entry = (struct skb_data *)skb->cb;
3257 	struct lan78xx_net *dev = entry->dev;
3258 
3259 	if (urb->status == 0) {
3260 		dev->net->stats.tx_packets += entry->num_of_packet;
3261 		dev->net->stats.tx_bytes += entry->length;
3262 	} else {
3263 		dev->net->stats.tx_errors += entry->num_of_packet;
3264 
3265 		switch (urb->status) {
3266 		case -EPIPE:
3267 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3268 			break;
3269 
3270 		/* software-driven interface shutdown */
3271 		case -ECONNRESET:
3272 		case -ESHUTDOWN:
3273 			netif_dbg(dev, tx_err, dev->net,
3274 				  "tx err interface gone %d\n",
3275 				  entry->urb->status);
3276 			break;
3277 
3278 		case -EPROTO:
3279 		case -ETIME:
3280 		case -EILSEQ:
3281 			netif_stop_queue(dev->net);
3282 			netif_dbg(dev, tx_err, dev->net,
3283 				  "tx err queue stopped %d\n",
3284 				  entry->urb->status);
3285 			break;
3286 		default:
3287 			netif_dbg(dev, tx_err, dev->net,
3288 				  "unknown tx err %d\n",
3289 				  entry->urb->status);
3290 			break;
3291 		}
3292 	}
3293 
3294 	usb_autopm_put_interface_async(dev->intf);
3295 
3296 	skb_unlink(skb, &dev->txq);
3297 
3298 	lan78xx_release_tx_buf(dev, skb);
3299 
3300 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3301 	 */
3302 	if (skb_queue_empty(&dev->txq) &&
3303 	    !skb_queue_empty(&dev->txq_pend))
3304 		napi_schedule(&dev->napi);
3305 }
3306 
3307 static void lan78xx_queue_skb(struct sk_buff_head *list,
3308 			      struct sk_buff *newsk, enum skb_state state)
3309 {
3310 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3311 
3312 	__skb_queue_tail(list, newsk);
3313 	entry->state = state;
3314 }
3315 
3316 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3317 {
3318 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3319 }
3320 
3321 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3322 {
3323 	return dev->tx_pend_data_len;
3324 }
3325 
3326 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3327 				    struct sk_buff *skb,
3328 				    unsigned int *tx_pend_data_len)
3329 {
3330 	unsigned long flags;
3331 
3332 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3333 
3334 	__skb_queue_tail(&dev->txq_pend, skb);
3335 
3336 	dev->tx_pend_data_len += skb->len;
3337 	*tx_pend_data_len = dev->tx_pend_data_len;
3338 
3339 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3340 }
3341 
3342 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3343 					 struct sk_buff *skb,
3344 					 unsigned int *tx_pend_data_len)
3345 {
3346 	unsigned long flags;
3347 
3348 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3349 
3350 	__skb_queue_head(&dev->txq_pend, skb);
3351 
3352 	dev->tx_pend_data_len += skb->len;
3353 	*tx_pend_data_len = dev->tx_pend_data_len;
3354 
3355 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3356 }
3357 
3358 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3359 				    struct sk_buff **skb,
3360 				    unsigned int *tx_pend_data_len)
3361 {
3362 	unsigned long flags;
3363 
3364 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3365 
3366 	*skb = __skb_dequeue(&dev->txq_pend);
3367 	if (*skb)
3368 		dev->tx_pend_data_len -= (*skb)->len;
3369 	*tx_pend_data_len = dev->tx_pend_data_len;
3370 
3371 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3372 }
3373 
3374 static netdev_tx_t
3375 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3376 {
3377 	struct lan78xx_net *dev = netdev_priv(net);
3378 	unsigned int tx_pend_data_len;
3379 
3380 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3381 		schedule_delayed_work(&dev->wq, 0);
3382 
3383 	skb_tx_timestamp(skb);
3384 
3385 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3386 
3387 	/* Set up a Tx URB if none is in progress */
3388 
3389 	if (skb_queue_empty(&dev->txq))
3390 		napi_schedule(&dev->napi);
3391 
3392 	/* Stop stack Tx queue if we have enough data to fill
3393 	 * all the free Tx URBs.
3394 	 */
3395 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3396 		netif_stop_queue(net);
3397 
3398 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3399 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3400 
3401 		/* Kick off transmission of pending data */
3402 
3403 		if (!skb_queue_empty(&dev->txq_free))
3404 			napi_schedule(&dev->napi);
3405 	}
3406 
3407 	return NETDEV_TX_OK;
3408 }
3409 
3410 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3411 {
3412 	struct lan78xx_priv *pdata = NULL;
3413 	int ret;
3414 	int i;
3415 
3416 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3417 
3418 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3419 	if (!pdata) {
3420 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3421 		return -ENOMEM;
3422 	}
3423 
3424 	pdata->dev = dev;
3425 
3426 	spin_lock_init(&pdata->rfe_ctl_lock);
3427 	mutex_init(&pdata->dataport_mutex);
3428 
3429 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3430 
3431 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3432 		pdata->vlan_table[i] = 0;
3433 
3434 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3435 
3436 	dev->net->features = 0;
3437 
3438 	if (DEFAULT_TX_CSUM_ENABLE)
3439 		dev->net->features |= NETIF_F_HW_CSUM;
3440 
3441 	if (DEFAULT_RX_CSUM_ENABLE)
3442 		dev->net->features |= NETIF_F_RXCSUM;
3443 
3444 	if (DEFAULT_TSO_CSUM_ENABLE)
3445 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3446 
3447 	if (DEFAULT_VLAN_RX_OFFLOAD)
3448 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3449 
3450 	if (DEFAULT_VLAN_FILTER_ENABLE)
3451 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3452 
3453 	dev->net->hw_features = dev->net->features;
3454 
3455 	ret = lan78xx_setup_irq_domain(dev);
3456 	if (ret < 0) {
3457 		netdev_warn(dev->net,
3458 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3459 		goto out1;
3460 	}
3461 
3462 	/* Init all registers */
3463 	ret = lan78xx_reset(dev);
3464 	if (ret) {
3465 		netdev_warn(dev->net, "Registers INIT FAILED....");
3466 		goto out2;
3467 	}
3468 
3469 	ret = lan78xx_mdio_init(dev);
3470 	if (ret) {
3471 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3472 		goto out2;
3473 	}
3474 
3475 	dev->net->flags |= IFF_MULTICAST;
3476 
3477 	pdata->wol = WAKE_MAGIC;
3478 
3479 	return ret;
3480 
3481 out2:
3482 	lan78xx_remove_irq_domain(dev);
3483 
3484 out1:
3485 	netdev_warn(dev->net, "Bind routine FAILED");
3486 	cancel_work_sync(&pdata->set_multicast);
3487 	cancel_work_sync(&pdata->set_vlan);
3488 	kfree(pdata);
3489 	return ret;
3490 }
3491 
3492 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3493 {
3494 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3495 
3496 	lan78xx_remove_irq_domain(dev);
3497 
3498 	lan78xx_remove_mdio(dev);
3499 
3500 	if (pdata) {
3501 		cancel_work_sync(&pdata->set_multicast);
3502 		cancel_work_sync(&pdata->set_vlan);
3503 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3504 		kfree(pdata);
3505 		pdata = NULL;
3506 		dev->data[0] = 0;
3507 	}
3508 }
3509 
3510 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3511 				    struct sk_buff *skb,
3512 				    u32 rx_cmd_a, u32 rx_cmd_b)
3513 {
3514 	/* HW Checksum offload appears to be flawed if used when not stripping
3515 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3516 	 */
3517 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3518 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3519 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3520 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3521 		skb->ip_summed = CHECKSUM_NONE;
3522 	} else {
3523 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3524 		skb->ip_summed = CHECKSUM_COMPLETE;
3525 	}
3526 }
3527 
3528 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3529 				    struct sk_buff *skb,
3530 				    u32 rx_cmd_a, u32 rx_cmd_b)
3531 {
3532 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3533 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3534 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3535 				       (rx_cmd_b & 0xffff));
3536 }
3537 
3538 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3539 {
3540 	dev->net->stats.rx_packets++;
3541 	dev->net->stats.rx_bytes += skb->len;
3542 
3543 	skb->protocol = eth_type_trans(skb, dev->net);
3544 
3545 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3546 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3547 	memset(skb->cb, 0, sizeof(struct skb_data));
3548 
3549 	if (skb_defer_rx_timestamp(skb))
3550 		return;
3551 
3552 	napi_gro_receive(&dev->napi, skb);
3553 }
3554 
3555 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3556 		      int budget, int *work_done)
3557 {
3558 	if (skb->len < RX_SKB_MIN_LEN)
3559 		return 0;
3560 
3561 	/* Extract frames from the URB buffer and pass each one to
3562 	 * the stack in a new NAPI SKB.
3563 	 */
3564 	while (skb->len > 0) {
3565 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3566 		u16 rx_cmd_c;
3567 		unsigned char *packet;
3568 
3569 		rx_cmd_a = get_unaligned_le32(skb->data);
3570 		skb_pull(skb, sizeof(rx_cmd_a));
3571 
3572 		rx_cmd_b = get_unaligned_le32(skb->data);
3573 		skb_pull(skb, sizeof(rx_cmd_b));
3574 
3575 		rx_cmd_c = get_unaligned_le16(skb->data);
3576 		skb_pull(skb, sizeof(rx_cmd_c));
3577 
3578 		packet = skb->data;
3579 
3580 		/* get the packet length */
3581 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3582 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3583 
3584 		if (unlikely(size > skb->len)) {
3585 			netif_dbg(dev, rx_err, dev->net,
3586 				  "size err rx_cmd_a=0x%08x\n",
3587 				  rx_cmd_a);
3588 			return 0;
3589 		}
3590 
3591 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3592 			netif_dbg(dev, rx_err, dev->net,
3593 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3594 		} else {
3595 			u32 frame_len;
3596 			struct sk_buff *skb2;
3597 
3598 			if (unlikely(size < ETH_FCS_LEN)) {
3599 				netif_dbg(dev, rx_err, dev->net,
3600 					  "size err rx_cmd_a=0x%08x\n",
3601 					  rx_cmd_a);
3602 				return 0;
3603 			}
3604 
3605 			frame_len = size - ETH_FCS_LEN;
3606 
3607 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3608 			if (!skb2)
3609 				return 0;
3610 
3611 			memcpy(skb2->data, packet, frame_len);
3612 
3613 			skb_put(skb2, frame_len);
3614 
3615 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3616 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3617 
3618 			/* Processing of the URB buffer must complete once
3619 			 * it has started. If the NAPI work budget is exhausted
3620 			 * while frames remain they are added to the overflow
3621 			 * queue for delivery in the next NAPI polling cycle.
3622 			 */
3623 			if (*work_done < budget) {
3624 				lan78xx_skb_return(dev, skb2);
3625 				++(*work_done);
3626 			} else {
3627 				skb_queue_tail(&dev->rxq_overflow, skb2);
3628 			}
3629 		}
3630 
3631 		skb_pull(skb, size);
3632 
3633 		/* skip padding bytes before the next frame starts */
3634 		if (skb->len)
3635 			skb_pull(skb, align_count);
3636 	}
3637 
3638 	return 1;
3639 }
3640 
3641 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3642 			      int budget, int *work_done)
3643 {
3644 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3645 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3646 		dev->net->stats.rx_errors++;
3647 	}
3648 }
3649 
3650 static void rx_complete(struct urb *urb)
3651 {
3652 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3653 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3654 	struct lan78xx_net *dev = entry->dev;
3655 	int urb_status = urb->status;
3656 	enum skb_state state;
3657 
3658 	netif_dbg(dev, rx_status, dev->net,
3659 		  "rx done: status %d", urb->status);
3660 
3661 	skb_put(skb, urb->actual_length);
3662 	state = rx_done;
3663 
3664 	if (urb != entry->urb)
3665 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3666 
3667 	switch (urb_status) {
3668 	case 0:
3669 		if (skb->len < RX_SKB_MIN_LEN) {
3670 			state = rx_cleanup;
3671 			dev->net->stats.rx_errors++;
3672 			dev->net->stats.rx_length_errors++;
3673 			netif_dbg(dev, rx_err, dev->net,
3674 				  "rx length %d\n", skb->len);
3675 		}
3676 		usb_mark_last_busy(dev->udev);
3677 		break;
3678 	case -EPIPE:
3679 		dev->net->stats.rx_errors++;
3680 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3681 		fallthrough;
3682 	case -ECONNRESET:				/* async unlink */
3683 	case -ESHUTDOWN:				/* hardware gone */
3684 		netif_dbg(dev, ifdown, dev->net,
3685 			  "rx shutdown, code %d\n", urb_status);
3686 		state = rx_cleanup;
3687 		break;
3688 	case -EPROTO:
3689 	case -ETIME:
3690 	case -EILSEQ:
3691 		dev->net->stats.rx_errors++;
3692 		state = rx_cleanup;
3693 		break;
3694 
3695 	/* data overrun ... flush fifo? */
3696 	case -EOVERFLOW:
3697 		dev->net->stats.rx_over_errors++;
3698 		fallthrough;
3699 
3700 	default:
3701 		state = rx_cleanup;
3702 		dev->net->stats.rx_errors++;
3703 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3704 		break;
3705 	}
3706 
3707 	state = defer_bh(dev, skb, &dev->rxq, state);
3708 }
3709 
3710 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3711 {
3712 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3713 	size_t size = dev->rx_urb_size;
3714 	struct urb *urb = entry->urb;
3715 	unsigned long lockflags;
3716 	int ret = 0;
3717 
3718 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3719 			  skb->data, size, rx_complete, skb);
3720 
3721 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3722 
3723 	if (netif_device_present(dev->net) &&
3724 	    netif_running(dev->net) &&
3725 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3726 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3727 		ret = usb_submit_urb(urb, flags);
3728 		switch (ret) {
3729 		case 0:
3730 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3731 			break;
3732 		case -EPIPE:
3733 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3734 			break;
3735 		case -ENODEV:
3736 		case -ENOENT:
3737 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3738 			netif_device_detach(dev->net);
3739 			break;
3740 		case -EHOSTUNREACH:
3741 			ret = -ENOLINK;
3742 			napi_schedule(&dev->napi);
3743 			break;
3744 		default:
3745 			netif_dbg(dev, rx_err, dev->net,
3746 				  "rx submit, %d\n", ret);
3747 			napi_schedule(&dev->napi);
3748 			break;
3749 		}
3750 	} else {
3751 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3752 		ret = -ENOLINK;
3753 	}
3754 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3755 
3756 	if (ret)
3757 		lan78xx_release_rx_buf(dev, skb);
3758 
3759 	return ret;
3760 }
3761 
3762 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3763 {
3764 	struct sk_buff *rx_buf;
3765 
3766 	/* Ensure the maximum number of Rx URBs is submitted
3767 	 */
3768 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3769 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3770 			break;
3771 	}
3772 }
3773 
3774 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3775 				    struct sk_buff *rx_buf)
3776 {
3777 	/* reset SKB data pointers */
3778 
3779 	rx_buf->data = rx_buf->head;
3780 	skb_reset_tail_pointer(rx_buf);
3781 	rx_buf->len = 0;
3782 	rx_buf->data_len = 0;
3783 
3784 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3785 }
3786 
3787 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3788 {
3789 	u32 tx_cmd_a;
3790 	u32 tx_cmd_b;
3791 
3792 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3793 
3794 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3795 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3796 
3797 	tx_cmd_b = 0;
3798 	if (skb_is_gso(skb)) {
3799 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3800 
3801 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3802 
3803 		tx_cmd_a |= TX_CMD_A_LSO_;
3804 	}
3805 
3806 	if (skb_vlan_tag_present(skb)) {
3807 		tx_cmd_a |= TX_CMD_A_IVTG_;
3808 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3809 	}
3810 
3811 	put_unaligned_le32(tx_cmd_a, buffer);
3812 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3813 }
3814 
3815 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3816 					    struct sk_buff *tx_buf)
3817 {
3818 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3819 	int remain = dev->tx_urb_size;
3820 	u8 *tx_data = tx_buf->data;
3821 	u32 urb_len = 0;
3822 
3823 	entry->num_of_packet = 0;
3824 	entry->length = 0;
3825 
3826 	/* Work through the pending SKBs and copy the data of each SKB into
3827 	 * the URB buffer if there room for all the SKB data.
3828 	 *
3829 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3830 	 */
3831 	while (remain >= TX_SKB_MIN_LEN) {
3832 		unsigned int pending_bytes;
3833 		unsigned int align_bytes;
3834 		struct sk_buff *skb;
3835 		unsigned int len;
3836 
3837 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3838 
3839 		if (!skb)
3840 			break;
3841 
3842 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3843 			      TX_ALIGNMENT;
3844 		len = align_bytes + TX_CMD_LEN + skb->len;
3845 		if (len > remain) {
3846 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3847 			break;
3848 		}
3849 
3850 		tx_data += align_bytes;
3851 
3852 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3853 		tx_data += TX_CMD_LEN;
3854 
3855 		len = skb->len;
3856 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3857 			struct net_device_stats *stats = &dev->net->stats;
3858 
3859 			stats->tx_dropped++;
3860 			dev_kfree_skb_any(skb);
3861 			tx_data -= TX_CMD_LEN;
3862 			continue;
3863 		}
3864 
3865 		tx_data += len;
3866 		entry->length += len;
3867 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3868 
3869 		dev_kfree_skb_any(skb);
3870 
3871 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3872 
3873 		remain = dev->tx_urb_size - urb_len;
3874 	}
3875 
3876 	skb_put(tx_buf, urb_len);
3877 
3878 	return entry;
3879 }
3880 
3881 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3882 {
3883 	int ret;
3884 
3885 	/* Start the stack Tx queue if it was stopped
3886 	 */
3887 	netif_tx_lock(dev->net);
3888 	if (netif_queue_stopped(dev->net)) {
3889 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3890 			netif_wake_queue(dev->net);
3891 	}
3892 	netif_tx_unlock(dev->net);
3893 
3894 	/* Go through the Tx pending queue and set up URBs to transfer
3895 	 * the data to the device. Stop if no more pending data or URBs,
3896 	 * or if an error occurs when a URB is submitted.
3897 	 */
3898 	do {
3899 		struct skb_data *entry;
3900 		struct sk_buff *tx_buf;
3901 		unsigned long flags;
3902 
3903 		if (skb_queue_empty(&dev->txq_pend))
3904 			break;
3905 
3906 		tx_buf = lan78xx_get_tx_buf(dev);
3907 		if (!tx_buf)
3908 			break;
3909 
3910 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3911 
3912 		spin_lock_irqsave(&dev->txq.lock, flags);
3913 		ret = usb_autopm_get_interface_async(dev->intf);
3914 		if (ret < 0) {
3915 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3916 			goto out;
3917 		}
3918 
3919 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3920 				  tx_buf->data, tx_buf->len, tx_complete,
3921 				  tx_buf);
3922 
3923 		if (tx_buf->len % dev->maxpacket == 0) {
3924 			/* send USB_ZERO_PACKET */
3925 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3926 		}
3927 
3928 #ifdef CONFIG_PM
3929 		/* if device is asleep stop outgoing packet processing */
3930 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3931 			usb_anchor_urb(entry->urb, &dev->deferred);
3932 			netif_stop_queue(dev->net);
3933 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3934 			netdev_dbg(dev->net,
3935 				   "Delaying transmission for resumption\n");
3936 			return;
3937 		}
3938 #endif
3939 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3940 		switch (ret) {
3941 		case 0:
3942 			netif_trans_update(dev->net);
3943 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3944 			break;
3945 		case -EPIPE:
3946 			netif_stop_queue(dev->net);
3947 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3948 			usb_autopm_put_interface_async(dev->intf);
3949 			break;
3950 		case -ENODEV:
3951 		case -ENOENT:
3952 			netif_dbg(dev, tx_err, dev->net,
3953 				  "tx submit urb err %d (disconnected?)", ret);
3954 			netif_device_detach(dev->net);
3955 			break;
3956 		default:
3957 			usb_autopm_put_interface_async(dev->intf);
3958 			netif_dbg(dev, tx_err, dev->net,
3959 				  "tx submit urb err %d\n", ret);
3960 			break;
3961 		}
3962 
3963 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3964 
3965 		if (ret) {
3966 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3967 out:
3968 			dev->net->stats.tx_dropped += entry->num_of_packet;
3969 			lan78xx_release_tx_buf(dev, tx_buf);
3970 		}
3971 	} while (ret == 0);
3972 }
3973 
3974 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3975 {
3976 	struct sk_buff_head done;
3977 	struct sk_buff *rx_buf;
3978 	struct skb_data *entry;
3979 	unsigned long flags;
3980 	int work_done = 0;
3981 
3982 	/* Pass frames received in the last NAPI cycle before
3983 	 * working on newly completed URBs.
3984 	 */
3985 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3986 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3987 		++work_done;
3988 	}
3989 
3990 	/* Take a snapshot of the done queue and move items to a
3991 	 * temporary queue. Rx URB completions will continue to add
3992 	 * to the done queue.
3993 	 */
3994 	__skb_queue_head_init(&done);
3995 
3996 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
3997 	skb_queue_splice_init(&dev->rxq_done, &done);
3998 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3999 
4000 	/* Extract receive frames from completed URBs and
4001 	 * pass them to the stack. Re-submit each completed URB.
4002 	 */
4003 	while ((work_done < budget) &&
4004 	       (rx_buf = __skb_dequeue(&done))) {
4005 		entry = (struct skb_data *)(rx_buf->cb);
4006 		switch (entry->state) {
4007 		case rx_done:
4008 			rx_process(dev, rx_buf, budget, &work_done);
4009 			break;
4010 		case rx_cleanup:
4011 			break;
4012 		default:
4013 			netdev_dbg(dev->net, "rx buf state %d\n",
4014 				   entry->state);
4015 			break;
4016 		}
4017 
4018 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4019 	}
4020 
4021 	/* If budget was consumed before processing all the URBs put them
4022 	 * back on the front of the done queue. They will be first to be
4023 	 * processed in the next NAPI cycle.
4024 	 */
4025 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4026 	skb_queue_splice(&done, &dev->rxq_done);
4027 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4028 
4029 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4030 		/* reset update timer delta */
4031 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4032 			dev->delta = 1;
4033 			mod_timer(&dev->stat_monitor,
4034 				  jiffies + STAT_UPDATE_TIMER);
4035 		}
4036 
4037 		/* Submit all free Rx URBs */
4038 
4039 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4040 			lan78xx_rx_urb_submit_all(dev);
4041 
4042 		/* Submit new Tx URBs */
4043 
4044 		lan78xx_tx_bh(dev);
4045 	}
4046 
4047 	return work_done;
4048 }
4049 
4050 static int lan78xx_poll(struct napi_struct *napi, int budget)
4051 {
4052 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4053 	int result = budget;
4054 	int work_done;
4055 
4056 	/* Don't do any work if the device is suspended */
4057 
4058 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4059 		napi_complete_done(napi, 0);
4060 		return 0;
4061 	}
4062 
4063 	/* Process completed URBs and submit new URBs */
4064 
4065 	work_done = lan78xx_bh(dev, budget);
4066 
4067 	if (work_done < budget) {
4068 		napi_complete_done(napi, work_done);
4069 
4070 		/* Start a new polling cycle if data was received or
4071 		 * data is waiting to be transmitted.
4072 		 */
4073 		if (!skb_queue_empty(&dev->rxq_done)) {
4074 			napi_schedule(napi);
4075 		} else if (netif_carrier_ok(dev->net)) {
4076 			if (skb_queue_empty(&dev->txq) &&
4077 			    !skb_queue_empty(&dev->txq_pend)) {
4078 				napi_schedule(napi);
4079 			} else {
4080 				netif_tx_lock(dev->net);
4081 				if (netif_queue_stopped(dev->net)) {
4082 					netif_wake_queue(dev->net);
4083 					napi_schedule(napi);
4084 				}
4085 				netif_tx_unlock(dev->net);
4086 			}
4087 		}
4088 		result = work_done;
4089 	}
4090 
4091 	return result;
4092 }
4093 
4094 static void lan78xx_delayedwork(struct work_struct *work)
4095 {
4096 	int status;
4097 	struct lan78xx_net *dev;
4098 
4099 	dev = container_of(work, struct lan78xx_net, wq.work);
4100 
4101 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4102 		return;
4103 
4104 	if (usb_autopm_get_interface(dev->intf) < 0)
4105 		return;
4106 
4107 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4108 		unlink_urbs(dev, &dev->txq);
4109 
4110 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4111 		if (status < 0 &&
4112 		    status != -EPIPE &&
4113 		    status != -ESHUTDOWN) {
4114 			if (netif_msg_tx_err(dev))
4115 				netdev_err(dev->net,
4116 					   "can't clear tx halt, status %d\n",
4117 					   status);
4118 		} else {
4119 			clear_bit(EVENT_TX_HALT, &dev->flags);
4120 			if (status != -ESHUTDOWN)
4121 				netif_wake_queue(dev->net);
4122 		}
4123 	}
4124 
4125 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4126 		unlink_urbs(dev, &dev->rxq);
4127 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4128 		if (status < 0 &&
4129 		    status != -EPIPE &&
4130 		    status != -ESHUTDOWN) {
4131 			if (netif_msg_rx_err(dev))
4132 				netdev_err(dev->net,
4133 					   "can't clear rx halt, status %d\n",
4134 					   status);
4135 		} else {
4136 			clear_bit(EVENT_RX_HALT, &dev->flags);
4137 			napi_schedule(&dev->napi);
4138 		}
4139 	}
4140 
4141 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4142 		int ret = 0;
4143 
4144 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4145 		if (lan78xx_link_reset(dev) < 0) {
4146 			netdev_info(dev->net, "link reset failed (%d)\n",
4147 				    ret);
4148 		}
4149 	}
4150 
4151 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4152 		lan78xx_update_stats(dev);
4153 
4154 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4155 
4156 		mod_timer(&dev->stat_monitor,
4157 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4158 
4159 		dev->delta = min((dev->delta * 2), 50);
4160 	}
4161 
4162 	usb_autopm_put_interface(dev->intf);
4163 }
4164 
4165 static void intr_complete(struct urb *urb)
4166 {
4167 	struct lan78xx_net *dev = urb->context;
4168 	int status = urb->status;
4169 
4170 	switch (status) {
4171 	/* success */
4172 	case 0:
4173 		lan78xx_status(dev, urb);
4174 		break;
4175 
4176 	/* software-driven interface shutdown */
4177 	case -ENOENT:			/* urb killed */
4178 	case -ENODEV:			/* hardware gone */
4179 	case -ESHUTDOWN:		/* hardware gone */
4180 		netif_dbg(dev, ifdown, dev->net,
4181 			  "intr shutdown, code %d\n", status);
4182 		return;
4183 
4184 	/* NOTE:  not throttling like RX/TX, since this endpoint
4185 	 * already polls infrequently
4186 	 */
4187 	default:
4188 		netdev_dbg(dev->net, "intr status %d\n", status);
4189 		break;
4190 	}
4191 
4192 	if (!netif_device_present(dev->net) ||
4193 	    !netif_running(dev->net)) {
4194 		netdev_warn(dev->net, "not submitting new status URB");
4195 		return;
4196 	}
4197 
4198 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4199 	status = usb_submit_urb(urb, GFP_ATOMIC);
4200 
4201 	switch (status) {
4202 	case  0:
4203 		break;
4204 	case -ENODEV:
4205 	case -ENOENT:
4206 		netif_dbg(dev, timer, dev->net,
4207 			  "intr resubmit %d (disconnect?)", status);
4208 		netif_device_detach(dev->net);
4209 		break;
4210 	default:
4211 		netif_err(dev, timer, dev->net,
4212 			  "intr resubmit --> %d\n", status);
4213 		break;
4214 	}
4215 }
4216 
4217 static void lan78xx_disconnect(struct usb_interface *intf)
4218 {
4219 	struct lan78xx_net *dev;
4220 	struct usb_device *udev;
4221 	struct net_device *net;
4222 	struct phy_device *phydev;
4223 
4224 	dev = usb_get_intfdata(intf);
4225 	usb_set_intfdata(intf, NULL);
4226 	if (!dev)
4227 		return;
4228 
4229 	netif_napi_del(&dev->napi);
4230 
4231 	udev = interface_to_usbdev(intf);
4232 	net = dev->net;
4233 
4234 	unregister_netdev(net);
4235 
4236 	timer_shutdown_sync(&dev->stat_monitor);
4237 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4238 	cancel_delayed_work_sync(&dev->wq);
4239 
4240 	phydev = net->phydev;
4241 
4242 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4243 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4244 
4245 	phy_disconnect(net->phydev);
4246 
4247 	if (phy_is_pseudo_fixed_link(phydev))
4248 		fixed_phy_unregister(phydev);
4249 
4250 	usb_scuttle_anchored_urbs(&dev->deferred);
4251 
4252 	lan78xx_unbind(dev, intf);
4253 
4254 	lan78xx_free_tx_resources(dev);
4255 	lan78xx_free_rx_resources(dev);
4256 
4257 	usb_kill_urb(dev->urb_intr);
4258 	usb_free_urb(dev->urb_intr);
4259 
4260 	free_netdev(net);
4261 	usb_put_dev(udev);
4262 }
4263 
4264 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4265 {
4266 	struct lan78xx_net *dev = netdev_priv(net);
4267 
4268 	unlink_urbs(dev, &dev->txq);
4269 	napi_schedule(&dev->napi);
4270 }
4271 
4272 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4273 						struct net_device *netdev,
4274 						netdev_features_t features)
4275 {
4276 	struct lan78xx_net *dev = netdev_priv(netdev);
4277 
4278 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4279 		features &= ~NETIF_F_GSO_MASK;
4280 
4281 	features = vlan_features_check(skb, features);
4282 	features = vxlan_features_check(skb, features);
4283 
4284 	return features;
4285 }
4286 
4287 static const struct net_device_ops lan78xx_netdev_ops = {
4288 	.ndo_open		= lan78xx_open,
4289 	.ndo_stop		= lan78xx_stop,
4290 	.ndo_start_xmit		= lan78xx_start_xmit,
4291 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4292 	.ndo_change_mtu		= lan78xx_change_mtu,
4293 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4294 	.ndo_validate_addr	= eth_validate_addr,
4295 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4296 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4297 	.ndo_set_features	= lan78xx_set_features,
4298 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4299 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4300 	.ndo_features_check	= lan78xx_features_check,
4301 };
4302 
4303 static void lan78xx_stat_monitor(struct timer_list *t)
4304 {
4305 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4306 
4307 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4308 }
4309 
4310 static int lan78xx_probe(struct usb_interface *intf,
4311 			 const struct usb_device_id *id)
4312 {
4313 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4314 	struct lan78xx_net *dev;
4315 	struct net_device *netdev;
4316 	struct usb_device *udev;
4317 	int ret;
4318 	unsigned int maxp;
4319 	unsigned int period;
4320 	u8 *buf = NULL;
4321 
4322 	udev = interface_to_usbdev(intf);
4323 	udev = usb_get_dev(udev);
4324 
4325 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4326 	if (!netdev) {
4327 		dev_err(&intf->dev, "Error: OOM\n");
4328 		ret = -ENOMEM;
4329 		goto out1;
4330 	}
4331 
4332 	/* netdev_printk() needs this */
4333 	SET_NETDEV_DEV(netdev, &intf->dev);
4334 
4335 	dev = netdev_priv(netdev);
4336 	dev->udev = udev;
4337 	dev->intf = intf;
4338 	dev->net = netdev;
4339 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4340 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4341 
4342 	skb_queue_head_init(&dev->rxq);
4343 	skb_queue_head_init(&dev->txq);
4344 	skb_queue_head_init(&dev->rxq_done);
4345 	skb_queue_head_init(&dev->txq_pend);
4346 	skb_queue_head_init(&dev->rxq_overflow);
4347 	mutex_init(&dev->phy_mutex);
4348 	mutex_init(&dev->dev_mutex);
4349 
4350 	ret = lan78xx_urb_config_init(dev);
4351 	if (ret < 0)
4352 		goto out2;
4353 
4354 	ret = lan78xx_alloc_tx_resources(dev);
4355 	if (ret < 0)
4356 		goto out2;
4357 
4358 	ret = lan78xx_alloc_rx_resources(dev);
4359 	if (ret < 0)
4360 		goto out3;
4361 
4362 	/* MTU range: 68 - 9000 */
4363 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4364 
4365 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4366 
4367 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4368 
4369 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4370 	init_usb_anchor(&dev->deferred);
4371 
4372 	netdev->netdev_ops = &lan78xx_netdev_ops;
4373 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4374 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4375 
4376 	dev->delta = 1;
4377 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4378 
4379 	mutex_init(&dev->stats.access_lock);
4380 
4381 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4382 		ret = -ENODEV;
4383 		goto out4;
4384 	}
4385 
4386 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4387 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4388 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4389 		ret = -ENODEV;
4390 		goto out4;
4391 	}
4392 
4393 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4394 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4395 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4396 		ret = -ENODEV;
4397 		goto out4;
4398 	}
4399 
4400 	ep_intr = &intf->cur_altsetting->endpoint[2];
4401 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4402 		ret = -ENODEV;
4403 		goto out4;
4404 	}
4405 
4406 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4407 					usb_endpoint_num(&ep_intr->desc));
4408 
4409 	ret = lan78xx_bind(dev, intf);
4410 	if (ret < 0)
4411 		goto out4;
4412 
4413 	period = ep_intr->desc.bInterval;
4414 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4415 	buf = kmalloc(maxp, GFP_KERNEL);
4416 	if (!buf) {
4417 		ret = -ENOMEM;
4418 		goto out5;
4419 	}
4420 
4421 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4422 	if (!dev->urb_intr) {
4423 		ret = -ENOMEM;
4424 		goto out6;
4425 	} else {
4426 		usb_fill_int_urb(dev->urb_intr, dev->udev,
4427 				 dev->pipe_intr, buf, maxp,
4428 				 intr_complete, dev, period);
4429 		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4430 	}
4431 
4432 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4433 
4434 	/* Reject broken descriptors. */
4435 	if (dev->maxpacket == 0) {
4436 		ret = -ENODEV;
4437 		goto out6;
4438 	}
4439 
4440 	/* driver requires remote-wakeup capability during autosuspend. */
4441 	intf->needs_remote_wakeup = 1;
4442 
4443 	ret = lan78xx_phy_init(dev);
4444 	if (ret < 0)
4445 		goto out7;
4446 
4447 	ret = register_netdev(netdev);
4448 	if (ret != 0) {
4449 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4450 		goto out8;
4451 	}
4452 
4453 	usb_set_intfdata(intf, dev);
4454 
4455 	ret = device_set_wakeup_enable(&udev->dev, true);
4456 
4457 	 /* Default delay of 2sec has more overhead than advantage.
4458 	  * Set to 10sec as default.
4459 	  */
4460 	pm_runtime_set_autosuspend_delay(&udev->dev,
4461 					 DEFAULT_AUTOSUSPEND_DELAY);
4462 
4463 	return 0;
4464 
4465 out8:
4466 	phy_disconnect(netdev->phydev);
4467 out7:
4468 	usb_free_urb(dev->urb_intr);
4469 out6:
4470 	kfree(buf);
4471 out5:
4472 	lan78xx_unbind(dev, intf);
4473 out4:
4474 	netif_napi_del(&dev->napi);
4475 	lan78xx_free_rx_resources(dev);
4476 out3:
4477 	lan78xx_free_tx_resources(dev);
4478 out2:
4479 	free_netdev(netdev);
4480 out1:
4481 	usb_put_dev(udev);
4482 
4483 	return ret;
4484 }
4485 
4486 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4487 {
4488 	const u16 crc16poly = 0x8005;
4489 	int i;
4490 	u16 bit, crc, msb;
4491 	u8 data;
4492 
4493 	crc = 0xFFFF;
4494 	for (i = 0; i < len; i++) {
4495 		data = *buf++;
4496 		for (bit = 0; bit < 8; bit++) {
4497 			msb = crc >> 15;
4498 			crc <<= 1;
4499 
4500 			if (msb ^ (u16)(data & 1)) {
4501 				crc ^= crc16poly;
4502 				crc |= (u16)0x0001U;
4503 			}
4504 			data >>= 1;
4505 		}
4506 	}
4507 
4508 	return crc;
4509 }
4510 
4511 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4512 {
4513 	u32 buf;
4514 	int ret;
4515 
4516 	ret = lan78xx_stop_tx_path(dev);
4517 	if (ret < 0)
4518 		return ret;
4519 
4520 	ret = lan78xx_stop_rx_path(dev);
4521 	if (ret < 0)
4522 		return ret;
4523 
4524 	/* auto suspend (selective suspend) */
4525 
4526 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4527 	if (ret < 0)
4528 		return ret;
4529 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4530 	if (ret < 0)
4531 		return ret;
4532 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4533 	if (ret < 0)
4534 		return ret;
4535 
4536 	/* set goodframe wakeup */
4537 
4538 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4539 	if (ret < 0)
4540 		return ret;
4541 
4542 	buf |= WUCSR_RFE_WAKE_EN_;
4543 	buf |= WUCSR_STORE_WAKE_;
4544 
4545 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4546 	if (ret < 0)
4547 		return ret;
4548 
4549 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4550 	if (ret < 0)
4551 		return ret;
4552 
4553 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4554 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4555 	buf |= PMT_CTL_PHY_WAKE_EN_;
4556 	buf |= PMT_CTL_WOL_EN_;
4557 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4558 	buf |= PMT_CTL_SUS_MODE_3_;
4559 
4560 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4561 	if (ret < 0)
4562 		return ret;
4563 
4564 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4565 	if (ret < 0)
4566 		return ret;
4567 
4568 	buf |= PMT_CTL_WUPS_MASK_;
4569 
4570 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4571 	if (ret < 0)
4572 		return ret;
4573 
4574 	ret = lan78xx_start_rx_path(dev);
4575 
4576 	return ret;
4577 }
4578 
4579 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4580 {
4581 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4582 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4583 	const u8 arp_type[2] = { 0x08, 0x06 };
4584 	u32 temp_pmt_ctl;
4585 	int mask_index;
4586 	u32 temp_wucsr;
4587 	u32 buf;
4588 	u16 crc;
4589 	int ret;
4590 
4591 	ret = lan78xx_stop_tx_path(dev);
4592 	if (ret < 0)
4593 		return ret;
4594 	ret = lan78xx_stop_rx_path(dev);
4595 	if (ret < 0)
4596 		return ret;
4597 
4598 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4599 	if (ret < 0)
4600 		return ret;
4601 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4602 	if (ret < 0)
4603 		return ret;
4604 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4605 	if (ret < 0)
4606 		return ret;
4607 
4608 	temp_wucsr = 0;
4609 
4610 	temp_pmt_ctl = 0;
4611 
4612 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4613 	if (ret < 0)
4614 		return ret;
4615 
4616 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4617 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4618 
4619 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4620 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4621 		if (ret < 0)
4622 			return ret;
4623 	}
4624 
4625 	mask_index = 0;
4626 	if (wol & WAKE_PHY) {
4627 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4628 
4629 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4630 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4631 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4632 	}
4633 	if (wol & WAKE_MAGIC) {
4634 		temp_wucsr |= WUCSR_MPEN_;
4635 
4636 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4637 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4638 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4639 	}
4640 	if (wol & WAKE_BCAST) {
4641 		temp_wucsr |= WUCSR_BCST_EN_;
4642 
4643 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4644 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4645 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4646 	}
4647 	if (wol & WAKE_MCAST) {
4648 		temp_wucsr |= WUCSR_WAKE_EN_;
4649 
4650 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4651 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4652 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4653 					WUF_CFGX_EN_ |
4654 					WUF_CFGX_TYPE_MCAST_ |
4655 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4656 					(crc & WUF_CFGX_CRC16_MASK_));
4657 		if (ret < 0)
4658 			return ret;
4659 
4660 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4661 		if (ret < 0)
4662 			return ret;
4663 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4664 		if (ret < 0)
4665 			return ret;
4666 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4667 		if (ret < 0)
4668 			return ret;
4669 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4670 		if (ret < 0)
4671 			return ret;
4672 
4673 		mask_index++;
4674 
4675 		/* for IPv6 Multicast */
4676 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4677 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4678 					WUF_CFGX_EN_ |
4679 					WUF_CFGX_TYPE_MCAST_ |
4680 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4681 					(crc & WUF_CFGX_CRC16_MASK_));
4682 		if (ret < 0)
4683 			return ret;
4684 
4685 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4686 		if (ret < 0)
4687 			return ret;
4688 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4689 		if (ret < 0)
4690 			return ret;
4691 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4692 		if (ret < 0)
4693 			return ret;
4694 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4695 		if (ret < 0)
4696 			return ret;
4697 
4698 		mask_index++;
4699 
4700 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4701 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4702 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4703 	}
4704 	if (wol & WAKE_UCAST) {
4705 		temp_wucsr |= WUCSR_PFDA_EN_;
4706 
4707 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4708 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4709 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4710 	}
4711 	if (wol & WAKE_ARP) {
4712 		temp_wucsr |= WUCSR_WAKE_EN_;
4713 
4714 		/* set WUF_CFG & WUF_MASK
4715 		 * for packettype (offset 12,13) = ARP (0x0806)
4716 		 */
4717 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4718 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4719 					WUF_CFGX_EN_ |
4720 					WUF_CFGX_TYPE_ALL_ |
4721 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4722 					(crc & WUF_CFGX_CRC16_MASK_));
4723 		if (ret < 0)
4724 			return ret;
4725 
4726 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4727 		if (ret < 0)
4728 			return ret;
4729 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4730 		if (ret < 0)
4731 			return ret;
4732 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4733 		if (ret < 0)
4734 			return ret;
4735 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4736 		if (ret < 0)
4737 			return ret;
4738 
4739 		mask_index++;
4740 
4741 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4742 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4743 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4744 	}
4745 
4746 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4747 	if (ret < 0)
4748 		return ret;
4749 
4750 	/* when multiple WOL bits are set */
4751 	if (hweight_long((unsigned long)wol) > 1) {
4752 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4753 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4754 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4755 	}
4756 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4757 	if (ret < 0)
4758 		return ret;
4759 
4760 	/* clear WUPS */
4761 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4762 	if (ret < 0)
4763 		return ret;
4764 
4765 	buf |= PMT_CTL_WUPS_MASK_;
4766 
4767 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4768 	if (ret < 0)
4769 		return ret;
4770 
4771 	ret = lan78xx_start_rx_path(dev);
4772 
4773 	return ret;
4774 }
4775 
4776 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4777 {
4778 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4779 	bool dev_open;
4780 	int ret;
4781 
4782 	mutex_lock(&dev->dev_mutex);
4783 
4784 	netif_dbg(dev, ifdown, dev->net,
4785 		  "suspending: pm event %#x", message.event);
4786 
4787 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4788 
4789 	if (dev_open) {
4790 		spin_lock_irq(&dev->txq.lock);
4791 		/* don't autosuspend while transmitting */
4792 		if ((skb_queue_len(&dev->txq) ||
4793 		     skb_queue_len(&dev->txq_pend)) &&
4794 		    PMSG_IS_AUTO(message)) {
4795 			spin_unlock_irq(&dev->txq.lock);
4796 			ret = -EBUSY;
4797 			goto out;
4798 		} else {
4799 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4800 			spin_unlock_irq(&dev->txq.lock);
4801 		}
4802 
4803 		/* stop RX */
4804 		ret = lan78xx_stop_rx_path(dev);
4805 		if (ret < 0)
4806 			goto out;
4807 
4808 		ret = lan78xx_flush_rx_fifo(dev);
4809 		if (ret < 0)
4810 			goto out;
4811 
4812 		/* stop Tx */
4813 		ret = lan78xx_stop_tx_path(dev);
4814 		if (ret < 0)
4815 			goto out;
4816 
4817 		/* empty out the Rx and Tx queues */
4818 		netif_device_detach(dev->net);
4819 		lan78xx_terminate_urbs(dev);
4820 		usb_kill_urb(dev->urb_intr);
4821 
4822 		/* reattach */
4823 		netif_device_attach(dev->net);
4824 
4825 		del_timer(&dev->stat_monitor);
4826 
4827 		if (PMSG_IS_AUTO(message)) {
4828 			ret = lan78xx_set_auto_suspend(dev);
4829 			if (ret < 0)
4830 				goto out;
4831 		} else {
4832 			struct lan78xx_priv *pdata;
4833 
4834 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4835 			netif_carrier_off(dev->net);
4836 			ret = lan78xx_set_suspend(dev, pdata->wol);
4837 			if (ret < 0)
4838 				goto out;
4839 		}
4840 	} else {
4841 		/* Interface is down; don't allow WOL and PHY
4842 		 * events to wake up the host
4843 		 */
4844 		u32 buf;
4845 
4846 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4847 
4848 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4849 		if (ret < 0)
4850 			goto out;
4851 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4852 		if (ret < 0)
4853 			goto out;
4854 
4855 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4856 		if (ret < 0)
4857 			goto out;
4858 
4859 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4860 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4861 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4862 		buf |= PMT_CTL_SUS_MODE_3_;
4863 
4864 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4865 		if (ret < 0)
4866 			goto out;
4867 
4868 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4869 		if (ret < 0)
4870 			goto out;
4871 
4872 		buf |= PMT_CTL_WUPS_MASK_;
4873 
4874 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4875 		if (ret < 0)
4876 			goto out;
4877 	}
4878 
4879 	ret = 0;
4880 out:
4881 	mutex_unlock(&dev->dev_mutex);
4882 
4883 	return ret;
4884 }
4885 
4886 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4887 {
4888 	bool pipe_halted = false;
4889 	struct urb *urb;
4890 
4891 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4892 		struct sk_buff *skb = urb->context;
4893 		int ret;
4894 
4895 		if (!netif_device_present(dev->net) ||
4896 		    !netif_carrier_ok(dev->net) ||
4897 		    pipe_halted) {
4898 			lan78xx_release_tx_buf(dev, skb);
4899 			continue;
4900 		}
4901 
4902 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4903 
4904 		if (ret == 0) {
4905 			netif_trans_update(dev->net);
4906 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4907 		} else {
4908 			if (ret == -EPIPE) {
4909 				netif_stop_queue(dev->net);
4910 				pipe_halted = true;
4911 			} else if (ret == -ENODEV) {
4912 				netif_device_detach(dev->net);
4913 			}
4914 
4915 			lan78xx_release_tx_buf(dev, skb);
4916 		}
4917 	}
4918 
4919 	return pipe_halted;
4920 }
4921 
4922 static int lan78xx_resume(struct usb_interface *intf)
4923 {
4924 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4925 	bool dev_open;
4926 	int ret;
4927 
4928 	mutex_lock(&dev->dev_mutex);
4929 
4930 	netif_dbg(dev, ifup, dev->net, "resuming device");
4931 
4932 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4933 
4934 	if (dev_open) {
4935 		bool pipe_halted = false;
4936 
4937 		ret = lan78xx_flush_tx_fifo(dev);
4938 		if (ret < 0)
4939 			goto out;
4940 
4941 		if (dev->urb_intr) {
4942 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4943 
4944 			if (ret < 0) {
4945 				if (ret == -ENODEV)
4946 					netif_device_detach(dev->net);
4947 				netdev_warn(dev->net, "Failed to submit intr URB");
4948 			}
4949 		}
4950 
4951 		spin_lock_irq(&dev->txq.lock);
4952 
4953 		if (netif_device_present(dev->net)) {
4954 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4955 
4956 			if (pipe_halted)
4957 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4958 		}
4959 
4960 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4961 
4962 		spin_unlock_irq(&dev->txq.lock);
4963 
4964 		if (!pipe_halted &&
4965 		    netif_device_present(dev->net) &&
4966 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4967 			netif_start_queue(dev->net);
4968 
4969 		ret = lan78xx_start_tx_path(dev);
4970 		if (ret < 0)
4971 			goto out;
4972 
4973 		napi_schedule(&dev->napi);
4974 
4975 		if (!timer_pending(&dev->stat_monitor)) {
4976 			dev->delta = 1;
4977 			mod_timer(&dev->stat_monitor,
4978 				  jiffies + STAT_UPDATE_TIMER);
4979 		}
4980 
4981 	} else {
4982 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4983 	}
4984 
4985 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4986 	if (ret < 0)
4987 		goto out;
4988 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4989 	if (ret < 0)
4990 		goto out;
4991 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4992 	if (ret < 0)
4993 		goto out;
4994 
4995 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4996 					     WUCSR2_ARP_RCD_ |
4997 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4998 					     WUCSR2_IPV4_TCPSYN_RCD_);
4999 	if (ret < 0)
5000 		goto out;
5001 
5002 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5003 					    WUCSR_EEE_RX_WAKE_ |
5004 					    WUCSR_PFDA_FR_ |
5005 					    WUCSR_RFE_WAKE_FR_ |
5006 					    WUCSR_WUFR_ |
5007 					    WUCSR_MPR_ |
5008 					    WUCSR_BCST_FR_);
5009 	if (ret < 0)
5010 		goto out;
5011 
5012 	ret = 0;
5013 out:
5014 	mutex_unlock(&dev->dev_mutex);
5015 
5016 	return ret;
5017 }
5018 
5019 static int lan78xx_reset_resume(struct usb_interface *intf)
5020 {
5021 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5022 	int ret;
5023 
5024 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5025 
5026 	ret = lan78xx_reset(dev);
5027 	if (ret < 0)
5028 		return ret;
5029 
5030 	phy_start(dev->net->phydev);
5031 
5032 	ret = lan78xx_resume(intf);
5033 
5034 	return ret;
5035 }
5036 
5037 static const struct usb_device_id products[] = {
5038 	{
5039 	/* LAN7800 USB Gigabit Ethernet Device */
5040 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5041 	},
5042 	{
5043 	/* LAN7850 USB Gigabit Ethernet Device */
5044 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5045 	},
5046 	{
5047 	/* LAN7801 USB Gigabit Ethernet Device */
5048 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5049 	},
5050 	{
5051 	/* ATM2-AF USB Gigabit Ethernet Device */
5052 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5053 	},
5054 	{},
5055 };
5056 MODULE_DEVICE_TABLE(usb, products);
5057 
5058 static struct usb_driver lan78xx_driver = {
5059 	.name			= DRIVER_NAME,
5060 	.id_table		= products,
5061 	.probe			= lan78xx_probe,
5062 	.disconnect		= lan78xx_disconnect,
5063 	.suspend		= lan78xx_suspend,
5064 	.resume			= lan78xx_resume,
5065 	.reset_resume		= lan78xx_reset_resume,
5066 	.supports_autosuspend	= 1,
5067 	.disable_hub_initiated_lpm = 1,
5068 };
5069 
5070 module_usb_driver(lan78xx_driver);
5071 
5072 MODULE_AUTHOR(DRIVER_AUTHOR);
5073 MODULE_DESCRIPTION(DRIVER_DESC);
5074 MODULE_LICENSE("GPL");
5075