xref: /linux/drivers/net/usb/lan78xx.c (revision a35d00d5512accd337510fa4de756b743d331a87)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		phy_mutex; /* for phy access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	return lan78xx_write_reg(dev, reg, buf);
678 }
679 
680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 			      struct lan78xx_statstage *data)
682 {
683 	int ret = 0;
684 	int i;
685 	struct lan78xx_statstage *stats;
686 	u32 *src;
687 	u32 *dst;
688 
689 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 	if (!stats)
691 		return -ENOMEM;
692 
693 	ret = usb_control_msg(dev->udev,
694 			      usb_rcvctrlpipe(dev->udev, 0),
695 			      USB_VENDOR_REQUEST_GET_STATS,
696 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 			      0,
698 			      0,
699 			      (void *)stats,
700 			      sizeof(*stats),
701 			      USB_CTRL_SET_TIMEOUT);
702 	if (likely(ret >= 0)) {
703 		src = (u32 *)stats;
704 		dst = (u32 *)data;
705 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 			le32_to_cpus(&src[i]);
707 			dst[i] = src[i];
708 		}
709 	} else {
710 		netdev_warn(dev->net,
711 			    "Failed to read stat ret = %d", ret);
712 	}
713 
714 	kfree(stats);
715 
716 	return ret;
717 }
718 
719 #define check_counter_rollover(struct1, dev_stats, member)		\
720 	do {								\
721 		if ((struct1)->member < (dev_stats).saved.member)	\
722 			(dev_stats).rollover_count.member++;		\
723 	} while (0)
724 
725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 					struct lan78xx_statstage *stats)
727 {
728 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775 
776 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778 
779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 	u32 *p, *count, *max;
782 	u64 *data;
783 	int i;
784 	struct lan78xx_statstage lan78xx_stats;
785 
786 	if (usb_autopm_get_interface(dev->intf) < 0)
787 		return;
788 
789 	p = (u32 *)&lan78xx_stats;
790 	count = (u32 *)&dev->stats.rollover_count;
791 	max = (u32 *)&dev->stats.rollover_max;
792 	data = (u64 *)&dev->stats.curr_stat;
793 
794 	mutex_lock(&dev->stats.access_lock);
795 
796 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798 
799 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801 
802 	mutex_unlock(&dev->stats.access_lock);
803 
804 	usb_autopm_put_interface(dev->intf);
805 }
806 
807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811 
812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 			   u32 hw_disabled)
814 {
815 	unsigned long timeout;
816 	bool stopped = true;
817 	int ret;
818 	u32 buf;
819 
820 	/* Stop the h/w block (if not already stopped) */
821 
822 	ret = lan78xx_read_reg(dev, reg, &buf);
823 	if (ret < 0)
824 		return ret;
825 
826 	if (buf & hw_enabled) {
827 		buf &= ~hw_enabled;
828 
829 		ret = lan78xx_write_reg(dev, reg, buf);
830 		if (ret < 0)
831 			return ret;
832 
833 		stopped = false;
834 		timeout = jiffies + HW_DISABLE_TIMEOUT;
835 		do  {
836 			ret = lan78xx_read_reg(dev, reg, &buf);
837 			if (ret < 0)
838 				return ret;
839 
840 			if (buf & hw_disabled)
841 				stopped = true;
842 			else
843 				msleep(HW_DISABLE_DELAY_MS);
844 		} while (!stopped && !time_after(jiffies, timeout));
845 	}
846 
847 	ret = stopped ? 0 : -ETIME;
848 
849 	return ret;
850 }
851 
852 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
853 {
854 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
855 }
856 
857 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
858 {
859 	int ret;
860 
861 	netif_dbg(dev, drv, dev->net, "start tx path");
862 
863 	/* Start the MAC transmitter */
864 
865 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	/* Start the Tx FIFO */
870 
871 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
872 	if (ret < 0)
873 		return ret;
874 
875 	return 0;
876 }
877 
878 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
879 {
880 	int ret;
881 
882 	netif_dbg(dev, drv, dev->net, "stop tx path");
883 
884 	/* Stop the Tx FIFO */
885 
886 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
887 	if (ret < 0)
888 		return ret;
889 
890 	/* Stop the MAC transmitter */
891 
892 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
893 	if (ret < 0)
894 		return ret;
895 
896 	return 0;
897 }
898 
899 /* The caller must ensure the Tx path is stopped before calling
900  * lan78xx_flush_tx_fifo().
901  */
902 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
903 {
904 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
905 }
906 
907 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
908 {
909 	int ret;
910 
911 	netif_dbg(dev, drv, dev->net, "start rx path");
912 
913 	/* Start the Rx FIFO */
914 
915 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	/* Start the MAC receiver*/
920 
921 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
922 	if (ret < 0)
923 		return ret;
924 
925 	return 0;
926 }
927 
928 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
929 {
930 	int ret;
931 
932 	netif_dbg(dev, drv, dev->net, "stop rx path");
933 
934 	/* Stop the MAC receiver */
935 
936 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
937 	if (ret < 0)
938 		return ret;
939 
940 	/* Stop the Rx FIFO */
941 
942 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
943 	if (ret < 0)
944 		return ret;
945 
946 	return 0;
947 }
948 
949 /* The caller must ensure the Rx path is stopped before calling
950  * lan78xx_flush_rx_fifo().
951  */
952 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
953 {
954 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
955 }
956 
957 /* Loop until the read is completed with timeout called with phy_mutex held */
958 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
959 {
960 	unsigned long start_time = jiffies;
961 	u32 val;
962 	int ret;
963 
964 	do {
965 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
966 		if (ret < 0)
967 			return ret;
968 
969 		if (!(val & MII_ACC_MII_BUSY_))
970 			return 0;
971 	} while (!time_after(jiffies, start_time + HZ));
972 
973 	return -ETIMEDOUT;
974 }
975 
976 static inline u32 mii_access(int id, int index, int read)
977 {
978 	u32 ret;
979 
980 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
981 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
982 	if (read)
983 		ret |= MII_ACC_MII_READ_;
984 	else
985 		ret |= MII_ACC_MII_WRITE_;
986 	ret |= MII_ACC_MII_BUSY_;
987 
988 	return ret;
989 }
990 
991 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
992 {
993 	unsigned long start_time = jiffies;
994 	u32 val;
995 	int ret;
996 
997 	do {
998 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
999 		if (ret < 0)
1000 			return ret;
1001 
1002 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1003 		    (val & E2P_CMD_EPC_TIMEOUT_))
1004 			break;
1005 		usleep_range(40, 100);
1006 	} while (!time_after(jiffies, start_time + HZ));
1007 
1008 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1009 		netdev_warn(dev->net, "EEPROM read operation timeout");
1010 		return -ETIMEDOUT;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1017 {
1018 	unsigned long start_time = jiffies;
1019 	u32 val;
1020 	int ret;
1021 
1022 	do {
1023 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1024 		if (ret < 0)
1025 			return ret;
1026 
1027 		if (!(val & E2P_CMD_EPC_BUSY_))
1028 			return 0;
1029 
1030 		usleep_range(40, 100);
1031 	} while (!time_after(jiffies, start_time + HZ));
1032 
1033 	netdev_warn(dev->net, "EEPROM is busy");
1034 	return -ETIMEDOUT;
1035 }
1036 
1037 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1038 				   u32 length, u8 *data)
1039 {
1040 	u32 val, saved;
1041 	int i, ret;
1042 
1043 	/* depends on chip, some EEPROM pins are muxed with LED function.
1044 	 * disable & restore LED function to access EEPROM.
1045 	 */
1046 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1047 	if (ret < 0)
1048 		return ret;
1049 
1050 	saved = val;
1051 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1052 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1053 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1054 		if (ret < 0)
1055 			return ret;
1056 	}
1057 
1058 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1059 	if (ret == -ETIMEDOUT)
1060 		goto read_raw_eeprom_done;
1061 	/* If USB fails, there is nothing to do */
1062 	if (ret < 0)
1063 		return ret;
1064 
1065 	for (i = 0; i < length; i++) {
1066 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1067 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1068 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1069 		if (ret < 0)
1070 			return ret;
1071 
1072 		ret = lan78xx_wait_eeprom(dev);
1073 		/* Looks like not USB specific error, try to recover */
1074 		if (ret == -ETIMEDOUT)
1075 			goto read_raw_eeprom_done;
1076 		/* If USB fails, there is nothing to do */
1077 		if (ret < 0)
1078 			return ret;
1079 
1080 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1081 		if (ret < 0)
1082 			return ret;
1083 
1084 		data[i] = val & 0xFF;
1085 		offset++;
1086 	}
1087 
1088 read_raw_eeprom_done:
1089 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1090 		return lan78xx_write_reg(dev, HW_CFG, saved);
1091 
1092 	return 0;
1093 }
1094 
1095 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1096 			       u32 length, u8 *data)
1097 {
1098 	int ret;
1099 	u8 sig;
1100 
1101 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1102 	if (ret < 0)
1103 		return ret;
1104 
1105 	if (sig != EEPROM_INDICATOR)
1106 		return -ENODATA;
1107 
1108 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1109 }
1110 
1111 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1112 				    u32 length, u8 *data)
1113 {
1114 	u32 val;
1115 	u32 saved;
1116 	int i, ret;
1117 
1118 	/* depends on chip, some EEPROM pins are muxed with LED function.
1119 	 * disable & restore LED function to access EEPROM.
1120 	 */
1121 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1122 	if (ret < 0)
1123 		return ret;
1124 
1125 	saved = val;
1126 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1127 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1128 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1129 		if (ret < 0)
1130 			return ret;
1131 	}
1132 
1133 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1134 	/* Looks like not USB specific error, try to recover */
1135 	if (ret == -ETIMEDOUT)
1136 		goto write_raw_eeprom_done;
1137 	/* If USB fails, there is nothing to do */
1138 	if (ret < 0)
1139 		return ret;
1140 
1141 	/* Issue write/erase enable command */
1142 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1143 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1144 	if (ret < 0)
1145 		return ret;
1146 
1147 	ret = lan78xx_wait_eeprom(dev);
1148 	/* Looks like not USB specific error, try to recover */
1149 	if (ret == -ETIMEDOUT)
1150 		goto write_raw_eeprom_done;
1151 	/* If USB fails, there is nothing to do */
1152 	if (ret < 0)
1153 		return ret;
1154 
1155 	for (i = 0; i < length; i++) {
1156 		/* Fill data register */
1157 		val = data[i];
1158 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1159 		if (ret < 0)
1160 			return ret;
1161 
1162 		/* Send "write" command */
1163 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1164 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1165 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1166 		if (ret < 0)
1167 			return ret;
1168 
1169 		ret = lan78xx_wait_eeprom(dev);
1170 		/* Looks like not USB specific error, try to recover */
1171 		if (ret == -ETIMEDOUT)
1172 			goto write_raw_eeprom_done;
1173 		/* If USB fails, there is nothing to do */
1174 		if (ret < 0)
1175 			return ret;
1176 
1177 		offset++;
1178 	}
1179 
1180 write_raw_eeprom_done:
1181 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1182 		return lan78xx_write_reg(dev, HW_CFG, saved);
1183 
1184 	return 0;
1185 }
1186 
1187 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1188 				u32 length, u8 *data)
1189 {
1190 	unsigned long timeout;
1191 	int ret, i;
1192 	u32 buf;
1193 
1194 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1195 	if (ret < 0)
1196 		return ret;
1197 
1198 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1199 		/* clear it and wait to be cleared */
1200 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1201 		if (ret < 0)
1202 			return ret;
1203 
1204 		timeout = jiffies + HZ;
1205 		do {
1206 			usleep_range(1, 10);
1207 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1208 			if (ret < 0)
1209 				return ret;
1210 
1211 			if (time_after(jiffies, timeout)) {
1212 				netdev_warn(dev->net,
1213 					    "timeout on OTP_PWR_DN");
1214 				return -ETIMEDOUT;
1215 			}
1216 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1217 	}
1218 
1219 	for (i = 0; i < length; i++) {
1220 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1221 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1222 		if (ret < 0)
1223 			return ret;
1224 
1225 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1226 					((offset + i) & OTP_ADDR2_10_3));
1227 		if (ret < 0)
1228 			return ret;
1229 
1230 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1231 		if (ret < 0)
1232 			return ret;
1233 
1234 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1235 		if (ret < 0)
1236 			return ret;
1237 
1238 		timeout = jiffies + HZ;
1239 		do {
1240 			udelay(1);
1241 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1242 			if (ret < 0)
1243 				return ret;
1244 
1245 			if (time_after(jiffies, timeout)) {
1246 				netdev_warn(dev->net,
1247 					    "timeout on OTP_STATUS");
1248 				return -ETIMEDOUT;
1249 			}
1250 		} while (buf & OTP_STATUS_BUSY_);
1251 
1252 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1253 		if (ret < 0)
1254 			return ret;
1255 
1256 		data[i] = (u8)(buf & 0xFF);
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1263 				 u32 length, u8 *data)
1264 {
1265 	int i;
1266 	u32 buf;
1267 	unsigned long timeout;
1268 	int ret;
1269 
1270 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1271 	if (ret < 0)
1272 		return ret;
1273 
1274 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1275 		/* clear it and wait to be cleared */
1276 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1277 		if (ret < 0)
1278 			return ret;
1279 
1280 		timeout = jiffies + HZ;
1281 		do {
1282 			udelay(1);
1283 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1284 			if (ret < 0)
1285 				return ret;
1286 
1287 			if (time_after(jiffies, timeout)) {
1288 				netdev_warn(dev->net,
1289 					    "timeout on OTP_PWR_DN completion");
1290 				return -ETIMEDOUT;
1291 			}
1292 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1293 	}
1294 
1295 	/* set to BYTE program mode */
1296 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1297 	if (ret < 0)
1298 		return ret;
1299 
1300 	for (i = 0; i < length; i++) {
1301 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1302 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1303 		if (ret < 0)
1304 			return ret;
1305 
1306 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1307 					((offset + i) & OTP_ADDR2_10_3));
1308 		if (ret < 0)
1309 			return ret;
1310 
1311 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1312 		if (ret < 0)
1313 			return ret;
1314 
1315 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1320 		if (ret < 0)
1321 			return ret;
1322 
1323 		timeout = jiffies + HZ;
1324 		do {
1325 			udelay(1);
1326 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1327 			if (ret < 0)
1328 				return ret;
1329 
1330 			if (time_after(jiffies, timeout)) {
1331 				netdev_warn(dev->net,
1332 					    "Timeout on OTP_STATUS completion");
1333 				return -ETIMEDOUT;
1334 			}
1335 		} while (buf & OTP_STATUS_BUSY_);
1336 	}
1337 
1338 	return 0;
1339 }
1340 
1341 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1342 			    u32 length, u8 *data)
1343 {
1344 	u8 sig;
1345 	int ret;
1346 
1347 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1348 
1349 	if (ret == 0) {
1350 		if (sig == OTP_INDICATOR_2)
1351 			offset += 0x100;
1352 		else if (sig != OTP_INDICATOR_1)
1353 			ret = -EINVAL;
1354 		if (!ret)
1355 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1356 	}
1357 
1358 	return ret;
1359 }
1360 
1361 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1362 {
1363 	int i, ret;
1364 
1365 	for (i = 0; i < 100; i++) {
1366 		u32 dp_sel;
1367 
1368 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1369 		if (unlikely(ret < 0))
1370 			return ret;
1371 
1372 		if (dp_sel & DP_SEL_DPRDY_)
1373 			return 0;
1374 
1375 		usleep_range(40, 100);
1376 	}
1377 
1378 	netdev_warn(dev->net, "%s timed out", __func__);
1379 
1380 	return -ETIMEDOUT;
1381 }
1382 
1383 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1384 				  u32 addr, u32 length, u32 *buf)
1385 {
1386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1387 	int i, ret;
1388 
1389 	ret = usb_autopm_get_interface(dev->intf);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	mutex_lock(&pdata->dataport_mutex);
1394 
1395 	ret = lan78xx_dataport_wait_not_busy(dev);
1396 	if (ret < 0)
1397 		goto dataport_write;
1398 
1399 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1400 	if (ret < 0)
1401 		goto dataport_write;
1402 
1403 	for (i = 0; i < length; i++) {
1404 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1405 		if (ret < 0)
1406 			goto dataport_write;
1407 
1408 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1409 		if (ret < 0)
1410 			goto dataport_write;
1411 
1412 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1413 		if (ret < 0)
1414 			goto dataport_write;
1415 
1416 		ret = lan78xx_dataport_wait_not_busy(dev);
1417 		if (ret < 0)
1418 			goto dataport_write;
1419 	}
1420 
1421 dataport_write:
1422 	if (ret < 0)
1423 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1424 
1425 	mutex_unlock(&pdata->dataport_mutex);
1426 	usb_autopm_put_interface(dev->intf);
1427 
1428 	return ret;
1429 }
1430 
1431 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1432 				    int index, u8 addr[ETH_ALEN])
1433 {
1434 	u32 temp;
1435 
1436 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1437 		temp = addr[3];
1438 		temp = addr[2] | (temp << 8);
1439 		temp = addr[1] | (temp << 8);
1440 		temp = addr[0] | (temp << 8);
1441 		pdata->pfilter_table[index][1] = temp;
1442 		temp = addr[5];
1443 		temp = addr[4] | (temp << 8);
1444 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1445 		pdata->pfilter_table[index][0] = temp;
1446 	}
1447 }
1448 
1449 /* returns hash bit number for given MAC address */
1450 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1451 {
1452 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1453 }
1454 
1455 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1456 {
1457 	struct lan78xx_priv *pdata =
1458 			container_of(param, struct lan78xx_priv, set_multicast);
1459 	struct lan78xx_net *dev = pdata->dev;
1460 	int i, ret;
1461 
1462 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1463 		  pdata->rfe_ctl);
1464 
1465 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1466 				     DP_SEL_VHF_VLAN_LEN,
1467 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1468 	if (ret < 0)
1469 		goto multicast_write_done;
1470 
1471 	for (i = 1; i < NUM_OF_MAF; i++) {
1472 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1473 		if (ret < 0)
1474 			goto multicast_write_done;
1475 
1476 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1477 					pdata->pfilter_table[i][1]);
1478 		if (ret < 0)
1479 			goto multicast_write_done;
1480 
1481 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1482 					pdata->pfilter_table[i][0]);
1483 		if (ret < 0)
1484 			goto multicast_write_done;
1485 	}
1486 
1487 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1488 
1489 multicast_write_done:
1490 	if (ret < 0)
1491 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1492 	return;
1493 }
1494 
1495 static void lan78xx_set_multicast(struct net_device *netdev)
1496 {
1497 	struct lan78xx_net *dev = netdev_priv(netdev);
1498 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1499 	unsigned long flags;
1500 	int i;
1501 
1502 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1503 
1504 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1505 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1506 
1507 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1508 		pdata->mchash_table[i] = 0;
1509 
1510 	/* pfilter_table[0] has own HW address */
1511 	for (i = 1; i < NUM_OF_MAF; i++) {
1512 		pdata->pfilter_table[i][0] = 0;
1513 		pdata->pfilter_table[i][1] = 0;
1514 	}
1515 
1516 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1517 
1518 	if (dev->net->flags & IFF_PROMISC) {
1519 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1520 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1521 	} else {
1522 		if (dev->net->flags & IFF_ALLMULTI) {
1523 			netif_dbg(dev, drv, dev->net,
1524 				  "receive all multicast enabled");
1525 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1526 		}
1527 	}
1528 
1529 	if (netdev_mc_count(dev->net)) {
1530 		struct netdev_hw_addr *ha;
1531 		int i;
1532 
1533 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1534 
1535 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1536 
1537 		i = 1;
1538 		netdev_for_each_mc_addr(ha, netdev) {
1539 			/* set first 32 into Perfect Filter */
1540 			if (i < 33) {
1541 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1542 			} else {
1543 				u32 bitnum = lan78xx_hash(ha->addr);
1544 
1545 				pdata->mchash_table[bitnum / 32] |=
1546 							(1 << (bitnum % 32));
1547 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1548 			}
1549 			i++;
1550 		}
1551 	}
1552 
1553 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1554 
1555 	/* defer register writes to a sleepable context */
1556 	schedule_work(&pdata->set_multicast);
1557 }
1558 
1559 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1560 				      u16 lcladv, u16 rmtadv)
1561 {
1562 	u32 flow = 0, fct_flow = 0;
1563 	u8 cap;
1564 
1565 	if (dev->fc_autoneg)
1566 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1567 	else
1568 		cap = dev->fc_request_control;
1569 
1570 	if (cap & FLOW_CTRL_TX)
1571 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1572 
1573 	if (cap & FLOW_CTRL_RX)
1574 		flow |= FLOW_CR_RX_FCEN_;
1575 
1576 	if (dev->udev->speed == USB_SPEED_SUPER)
1577 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1578 	else if (dev->udev->speed == USB_SPEED_HIGH)
1579 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1580 
1581 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1582 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1583 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1584 
1585 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1586 
1587 	/* threshold value should be set before enabling flow */
1588 	lan78xx_write_reg(dev, FLOW, flow);
1589 
1590 	return 0;
1591 }
1592 
1593 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1594 
1595 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1596 {
1597 	unsigned long start_time = jiffies;
1598 	u32 val;
1599 	int ret;
1600 
1601 	mutex_lock(&dev->phy_mutex);
1602 
1603 	/* Resetting the device while there is activity on the MDIO
1604 	 * bus can result in the MAC interface locking up and not
1605 	 * completing register access transactions.
1606 	 */
1607 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1608 	if (ret < 0)
1609 		goto done;
1610 
1611 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1612 	if (ret < 0)
1613 		goto done;
1614 
1615 	val |= MAC_CR_RST_;
1616 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1617 	if (ret < 0)
1618 		goto done;
1619 
1620 	/* Wait for the reset to complete before allowing any further
1621 	 * MAC register accesses otherwise the MAC may lock up.
1622 	 */
1623 	do {
1624 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1625 		if (ret < 0)
1626 			goto done;
1627 
1628 		if (!(val & MAC_CR_RST_)) {
1629 			ret = 0;
1630 			goto done;
1631 		}
1632 	} while (!time_after(jiffies, start_time + HZ));
1633 
1634 	ret = -ETIMEDOUT;
1635 done:
1636 	mutex_unlock(&dev->phy_mutex);
1637 
1638 	return ret;
1639 }
1640 
1641 static int lan78xx_link_reset(struct lan78xx_net *dev)
1642 {
1643 	struct phy_device *phydev = dev->net->phydev;
1644 	struct ethtool_link_ksettings ecmd;
1645 	int ladv, radv, ret, link;
1646 	u32 buf;
1647 
1648 	/* clear LAN78xx interrupt status */
1649 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1650 	if (unlikely(ret < 0))
1651 		return ret;
1652 
1653 	mutex_lock(&phydev->lock);
1654 	phy_read_status(phydev);
1655 	link = phydev->link;
1656 	mutex_unlock(&phydev->lock);
1657 
1658 	if (!link && dev->link_on) {
1659 		dev->link_on = false;
1660 
1661 		/* reset MAC */
1662 		ret = lan78xx_mac_reset(dev);
1663 		if (ret < 0)
1664 			return ret;
1665 
1666 		del_timer(&dev->stat_monitor);
1667 	} else if (link && !dev->link_on) {
1668 		dev->link_on = true;
1669 
1670 		phy_ethtool_ksettings_get(phydev, &ecmd);
1671 
1672 		if (dev->udev->speed == USB_SPEED_SUPER) {
1673 			if (ecmd.base.speed == 1000) {
1674 				/* disable U2 */
1675 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1676 				if (ret < 0)
1677 					return ret;
1678 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1679 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1680 				if (ret < 0)
1681 					return ret;
1682 				/* enable U1 */
1683 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1684 				if (ret < 0)
1685 					return ret;
1686 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1687 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1688 				if (ret < 0)
1689 					return ret;
1690 			} else {
1691 				/* enable U1 & U2 */
1692 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1693 				if (ret < 0)
1694 					return ret;
1695 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1696 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1697 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1698 				if (ret < 0)
1699 					return ret;
1700 			}
1701 		}
1702 
1703 		ladv = phy_read(phydev, MII_ADVERTISE);
1704 		if (ladv < 0)
1705 			return ladv;
1706 
1707 		radv = phy_read(phydev, MII_LPA);
1708 		if (radv < 0)
1709 			return radv;
1710 
1711 		netif_dbg(dev, link, dev->net,
1712 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1713 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1714 
1715 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1716 						 radv);
1717 		if (ret < 0)
1718 			return ret;
1719 
1720 		if (!timer_pending(&dev->stat_monitor)) {
1721 			dev->delta = 1;
1722 			mod_timer(&dev->stat_monitor,
1723 				  jiffies + STAT_UPDATE_TIMER);
1724 		}
1725 
1726 		lan78xx_rx_urb_submit_all(dev);
1727 
1728 		local_bh_disable();
1729 		napi_schedule(&dev->napi);
1730 		local_bh_enable();
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 /* some work can't be done in tasklets, so we use keventd
1737  *
1738  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1739  * but tasklet_schedule() doesn't.	hope the failure is rare.
1740  */
1741 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1742 {
1743 	set_bit(work, &dev->flags);
1744 	if (!schedule_delayed_work(&dev->wq, 0))
1745 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1746 }
1747 
1748 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1749 {
1750 	u32 intdata;
1751 
1752 	if (urb->actual_length != 4) {
1753 		netdev_warn(dev->net,
1754 			    "unexpected urb length %d", urb->actual_length);
1755 		return;
1756 	}
1757 
1758 	intdata = get_unaligned_le32(urb->transfer_buffer);
1759 
1760 	if (intdata & INT_ENP_PHY_INT) {
1761 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1762 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1763 
1764 		if (dev->domain_data.phyirq > 0)
1765 			generic_handle_irq_safe(dev->domain_data.phyirq);
1766 	} else {
1767 		netdev_warn(dev->net,
1768 			    "unexpected interrupt: 0x%08x\n", intdata);
1769 	}
1770 }
1771 
1772 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1773 {
1774 	return MAX_EEPROM_SIZE;
1775 }
1776 
1777 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1778 				      struct ethtool_eeprom *ee, u8 *data)
1779 {
1780 	struct lan78xx_net *dev = netdev_priv(netdev);
1781 	int ret;
1782 
1783 	ret = usb_autopm_get_interface(dev->intf);
1784 	if (ret)
1785 		return ret;
1786 
1787 	ee->magic = LAN78XX_EEPROM_MAGIC;
1788 
1789 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1790 
1791 	usb_autopm_put_interface(dev->intf);
1792 
1793 	return ret;
1794 }
1795 
1796 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1797 				      struct ethtool_eeprom *ee, u8 *data)
1798 {
1799 	struct lan78xx_net *dev = netdev_priv(netdev);
1800 	int ret;
1801 
1802 	ret = usb_autopm_get_interface(dev->intf);
1803 	if (ret)
1804 		return ret;
1805 
1806 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1807 	 * to load data from EEPROM
1808 	 */
1809 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1810 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1811 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1812 		 (ee->offset == 0) &&
1813 		 (ee->len == 512) &&
1814 		 (data[0] == OTP_INDICATOR_1))
1815 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1816 
1817 	usb_autopm_put_interface(dev->intf);
1818 
1819 	return ret;
1820 }
1821 
1822 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1823 				u8 *data)
1824 {
1825 	if (stringset == ETH_SS_STATS)
1826 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1827 }
1828 
1829 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1830 {
1831 	if (sset == ETH_SS_STATS)
1832 		return ARRAY_SIZE(lan78xx_gstrings);
1833 	else
1834 		return -EOPNOTSUPP;
1835 }
1836 
1837 static void lan78xx_get_stats(struct net_device *netdev,
1838 			      struct ethtool_stats *stats, u64 *data)
1839 {
1840 	struct lan78xx_net *dev = netdev_priv(netdev);
1841 
1842 	lan78xx_update_stats(dev);
1843 
1844 	mutex_lock(&dev->stats.access_lock);
1845 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1846 	mutex_unlock(&dev->stats.access_lock);
1847 }
1848 
1849 static void lan78xx_get_wol(struct net_device *netdev,
1850 			    struct ethtool_wolinfo *wol)
1851 {
1852 	struct lan78xx_net *dev = netdev_priv(netdev);
1853 	int ret;
1854 	u32 buf;
1855 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1856 
1857 	if (usb_autopm_get_interface(dev->intf) < 0)
1858 		return;
1859 
1860 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1861 	if (unlikely(ret < 0)) {
1862 		wol->supported = 0;
1863 		wol->wolopts = 0;
1864 	} else {
1865 		if (buf & USB_CFG_RMT_WKP_) {
1866 			wol->supported = WAKE_ALL;
1867 			wol->wolopts = pdata->wol;
1868 		} else {
1869 			wol->supported = 0;
1870 			wol->wolopts = 0;
1871 		}
1872 	}
1873 
1874 	usb_autopm_put_interface(dev->intf);
1875 }
1876 
1877 static int lan78xx_set_wol(struct net_device *netdev,
1878 			   struct ethtool_wolinfo *wol)
1879 {
1880 	struct lan78xx_net *dev = netdev_priv(netdev);
1881 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1882 	int ret;
1883 
1884 	if (wol->wolopts & ~WAKE_ALL)
1885 		return -EINVAL;
1886 
1887 	ret = usb_autopm_get_interface(dev->intf);
1888 	if (ret < 0)
1889 		return ret;
1890 
1891 	pdata->wol = wol->wolopts;
1892 
1893 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1894 
1895 	phy_ethtool_set_wol(netdev->phydev, wol);
1896 
1897 	usb_autopm_put_interface(dev->intf);
1898 
1899 	return ret;
1900 }
1901 
1902 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1903 {
1904 	struct lan78xx_net *dev = netdev_priv(net);
1905 	struct phy_device *phydev = net->phydev;
1906 	int ret;
1907 	u32 buf;
1908 
1909 	ret = usb_autopm_get_interface(dev->intf);
1910 	if (ret < 0)
1911 		return ret;
1912 
1913 	ret = phy_ethtool_get_eee(phydev, edata);
1914 	if (ret < 0)
1915 		goto exit;
1916 
1917 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1918 	if (buf & MAC_CR_EEE_EN_) {
1919 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1920 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1921 		edata->tx_lpi_timer = buf;
1922 	} else {
1923 		edata->tx_lpi_timer = 0;
1924 	}
1925 
1926 	ret = 0;
1927 exit:
1928 	usb_autopm_put_interface(dev->intf);
1929 
1930 	return ret;
1931 }
1932 
1933 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1934 {
1935 	struct lan78xx_net *dev = netdev_priv(net);
1936 	int ret;
1937 	u32 buf;
1938 
1939 	ret = usb_autopm_get_interface(dev->intf);
1940 	if (ret < 0)
1941 		return ret;
1942 
1943 	ret = phy_ethtool_set_eee(net->phydev, edata);
1944 	if (ret < 0)
1945 		goto out;
1946 
1947 	buf = (u32)edata->tx_lpi_timer;
1948 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1949 out:
1950 	usb_autopm_put_interface(dev->intf);
1951 
1952 	return ret;
1953 }
1954 
1955 static u32 lan78xx_get_link(struct net_device *net)
1956 {
1957 	u32 link;
1958 
1959 	mutex_lock(&net->phydev->lock);
1960 	phy_read_status(net->phydev);
1961 	link = net->phydev->link;
1962 	mutex_unlock(&net->phydev->lock);
1963 
1964 	return link;
1965 }
1966 
1967 static void lan78xx_get_drvinfo(struct net_device *net,
1968 				struct ethtool_drvinfo *info)
1969 {
1970 	struct lan78xx_net *dev = netdev_priv(net);
1971 
1972 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1973 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1974 }
1975 
1976 static u32 lan78xx_get_msglevel(struct net_device *net)
1977 {
1978 	struct lan78xx_net *dev = netdev_priv(net);
1979 
1980 	return dev->msg_enable;
1981 }
1982 
1983 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1984 {
1985 	struct lan78xx_net *dev = netdev_priv(net);
1986 
1987 	dev->msg_enable = level;
1988 }
1989 
1990 static int lan78xx_get_link_ksettings(struct net_device *net,
1991 				      struct ethtool_link_ksettings *cmd)
1992 {
1993 	struct lan78xx_net *dev = netdev_priv(net);
1994 	struct phy_device *phydev = net->phydev;
1995 	int ret;
1996 
1997 	ret = usb_autopm_get_interface(dev->intf);
1998 	if (ret < 0)
1999 		return ret;
2000 
2001 	phy_ethtool_ksettings_get(phydev, cmd);
2002 
2003 	usb_autopm_put_interface(dev->intf);
2004 
2005 	return ret;
2006 }
2007 
2008 static int lan78xx_set_link_ksettings(struct net_device *net,
2009 				      const struct ethtool_link_ksettings *cmd)
2010 {
2011 	struct lan78xx_net *dev = netdev_priv(net);
2012 	struct phy_device *phydev = net->phydev;
2013 	int ret = 0;
2014 	int temp;
2015 
2016 	ret = usb_autopm_get_interface(dev->intf);
2017 	if (ret < 0)
2018 		return ret;
2019 
2020 	/* change speed & duplex */
2021 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2022 
2023 	if (!cmd->base.autoneg) {
2024 		/* force link down */
2025 		temp = phy_read(phydev, MII_BMCR);
2026 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2027 		mdelay(1);
2028 		phy_write(phydev, MII_BMCR, temp);
2029 	}
2030 
2031 	usb_autopm_put_interface(dev->intf);
2032 
2033 	return ret;
2034 }
2035 
2036 static void lan78xx_get_pause(struct net_device *net,
2037 			      struct ethtool_pauseparam *pause)
2038 {
2039 	struct lan78xx_net *dev = netdev_priv(net);
2040 	struct phy_device *phydev = net->phydev;
2041 	struct ethtool_link_ksettings ecmd;
2042 
2043 	phy_ethtool_ksettings_get(phydev, &ecmd);
2044 
2045 	pause->autoneg = dev->fc_autoneg;
2046 
2047 	if (dev->fc_request_control & FLOW_CTRL_TX)
2048 		pause->tx_pause = 1;
2049 
2050 	if (dev->fc_request_control & FLOW_CTRL_RX)
2051 		pause->rx_pause = 1;
2052 }
2053 
2054 static int lan78xx_set_pause(struct net_device *net,
2055 			     struct ethtool_pauseparam *pause)
2056 {
2057 	struct lan78xx_net *dev = netdev_priv(net);
2058 	struct phy_device *phydev = net->phydev;
2059 	struct ethtool_link_ksettings ecmd;
2060 	int ret;
2061 
2062 	phy_ethtool_ksettings_get(phydev, &ecmd);
2063 
2064 	if (pause->autoneg && !ecmd.base.autoneg) {
2065 		ret = -EINVAL;
2066 		goto exit;
2067 	}
2068 
2069 	dev->fc_request_control = 0;
2070 	if (pause->rx_pause)
2071 		dev->fc_request_control |= FLOW_CTRL_RX;
2072 
2073 	if (pause->tx_pause)
2074 		dev->fc_request_control |= FLOW_CTRL_TX;
2075 
2076 	if (ecmd.base.autoneg) {
2077 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2078 		u32 mii_adv;
2079 
2080 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2081 				   ecmd.link_modes.advertising);
2082 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2083 				   ecmd.link_modes.advertising);
2084 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2085 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2086 		linkmode_or(ecmd.link_modes.advertising, fc,
2087 			    ecmd.link_modes.advertising);
2088 
2089 		phy_ethtool_ksettings_set(phydev, &ecmd);
2090 	}
2091 
2092 	dev->fc_autoneg = pause->autoneg;
2093 
2094 	ret = 0;
2095 exit:
2096 	return ret;
2097 }
2098 
2099 static int lan78xx_get_regs_len(struct net_device *netdev)
2100 {
2101 	if (!netdev->phydev)
2102 		return (sizeof(lan78xx_regs));
2103 	else
2104 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
2105 }
2106 
2107 static void
2108 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2109 		 void *buf)
2110 {
2111 	u32 *data = buf;
2112 	int i, j;
2113 	struct lan78xx_net *dev = netdev_priv(netdev);
2114 
2115 	/* Read Device/MAC registers */
2116 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
2117 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2118 
2119 	if (!netdev->phydev)
2120 		return;
2121 
2122 	/* Read PHY registers */
2123 	for (j = 0; j < 32; i++, j++)
2124 		data[i] = phy_read(netdev->phydev, j);
2125 }
2126 
2127 static const struct ethtool_ops lan78xx_ethtool_ops = {
2128 	.get_link	= lan78xx_get_link,
2129 	.nway_reset	= phy_ethtool_nway_reset,
2130 	.get_drvinfo	= lan78xx_get_drvinfo,
2131 	.get_msglevel	= lan78xx_get_msglevel,
2132 	.set_msglevel	= lan78xx_set_msglevel,
2133 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2134 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2135 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2136 	.get_ethtool_stats = lan78xx_get_stats,
2137 	.get_sset_count = lan78xx_get_sset_count,
2138 	.get_strings	= lan78xx_get_strings,
2139 	.get_wol	= lan78xx_get_wol,
2140 	.set_wol	= lan78xx_set_wol,
2141 	.get_ts_info	= ethtool_op_get_ts_info,
2142 	.get_eee	= lan78xx_get_eee,
2143 	.set_eee	= lan78xx_set_eee,
2144 	.get_pauseparam	= lan78xx_get_pause,
2145 	.set_pauseparam	= lan78xx_set_pause,
2146 	.get_link_ksettings = lan78xx_get_link_ksettings,
2147 	.set_link_ksettings = lan78xx_set_link_ksettings,
2148 	.get_regs_len	= lan78xx_get_regs_len,
2149 	.get_regs	= lan78xx_get_regs,
2150 };
2151 
2152 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2153 {
2154 	u32 addr_lo, addr_hi;
2155 	u8 addr[6];
2156 	int ret;
2157 
2158 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2159 	if (ret < 0)
2160 		return ret;
2161 
2162 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2163 	if (ret < 0)
2164 		return ret;
2165 
2166 	addr[0] = addr_lo & 0xFF;
2167 	addr[1] = (addr_lo >> 8) & 0xFF;
2168 	addr[2] = (addr_lo >> 16) & 0xFF;
2169 	addr[3] = (addr_lo >> 24) & 0xFF;
2170 	addr[4] = addr_hi & 0xFF;
2171 	addr[5] = (addr_hi >> 8) & 0xFF;
2172 
2173 	if (!is_valid_ether_addr(addr)) {
2174 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2175 			/* valid address present in Device Tree */
2176 			netif_dbg(dev, ifup, dev->net,
2177 				  "MAC address read from Device Tree");
2178 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2179 						 ETH_ALEN, addr) == 0) ||
2180 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2181 					      ETH_ALEN, addr) == 0)) &&
2182 			   is_valid_ether_addr(addr)) {
2183 			/* eeprom values are valid so use them */
2184 			netif_dbg(dev, ifup, dev->net,
2185 				  "MAC address read from EEPROM");
2186 		} else {
2187 			/* generate random MAC */
2188 			eth_random_addr(addr);
2189 			netif_dbg(dev, ifup, dev->net,
2190 				  "MAC address set to random addr");
2191 		}
2192 
2193 		addr_lo = addr[0] | (addr[1] << 8) |
2194 			  (addr[2] << 16) | (addr[3] << 24);
2195 		addr_hi = addr[4] | (addr[5] << 8);
2196 
2197 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2198 		if (ret < 0)
2199 			return ret;
2200 
2201 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2202 		if (ret < 0)
2203 			return ret;
2204 	}
2205 
2206 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2207 	if (ret < 0)
2208 		return ret;
2209 
2210 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2211 	if (ret < 0)
2212 		return ret;
2213 
2214 	eth_hw_addr_set(dev->net, addr);
2215 
2216 	return 0;
2217 }
2218 
2219 /* MDIO read and write wrappers for phylib */
2220 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2221 {
2222 	struct lan78xx_net *dev = bus->priv;
2223 	u32 val, addr;
2224 	int ret;
2225 
2226 	ret = usb_autopm_get_interface(dev->intf);
2227 	if (ret < 0)
2228 		return ret;
2229 
2230 	mutex_lock(&dev->phy_mutex);
2231 
2232 	/* confirm MII not busy */
2233 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2234 	if (ret < 0)
2235 		goto done;
2236 
2237 	/* set the address, index & direction (read from PHY) */
2238 	addr = mii_access(phy_id, idx, MII_READ);
2239 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2240 	if (ret < 0)
2241 		goto done;
2242 
2243 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2244 	if (ret < 0)
2245 		goto done;
2246 
2247 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2248 	if (ret < 0)
2249 		goto done;
2250 
2251 	ret = (int)(val & 0xFFFF);
2252 
2253 done:
2254 	mutex_unlock(&dev->phy_mutex);
2255 	usb_autopm_put_interface(dev->intf);
2256 
2257 	return ret;
2258 }
2259 
2260 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2261 				 u16 regval)
2262 {
2263 	struct lan78xx_net *dev = bus->priv;
2264 	u32 val, addr;
2265 	int ret;
2266 
2267 	ret = usb_autopm_get_interface(dev->intf);
2268 	if (ret < 0)
2269 		return ret;
2270 
2271 	mutex_lock(&dev->phy_mutex);
2272 
2273 	/* confirm MII not busy */
2274 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2275 	if (ret < 0)
2276 		goto done;
2277 
2278 	val = (u32)regval;
2279 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2280 	if (ret < 0)
2281 		goto done;
2282 
2283 	/* set the address, index & direction (write to PHY) */
2284 	addr = mii_access(phy_id, idx, MII_WRITE);
2285 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2286 	if (ret < 0)
2287 		goto done;
2288 
2289 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2290 	if (ret < 0)
2291 		goto done;
2292 
2293 done:
2294 	mutex_unlock(&dev->phy_mutex);
2295 	usb_autopm_put_interface(dev->intf);
2296 	return ret;
2297 }
2298 
2299 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2300 {
2301 	struct device_node *node;
2302 	int ret;
2303 
2304 	dev->mdiobus = mdiobus_alloc();
2305 	if (!dev->mdiobus) {
2306 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2307 		return -ENOMEM;
2308 	}
2309 
2310 	dev->mdiobus->priv = (void *)dev;
2311 	dev->mdiobus->read = lan78xx_mdiobus_read;
2312 	dev->mdiobus->write = lan78xx_mdiobus_write;
2313 	dev->mdiobus->name = "lan78xx-mdiobus";
2314 	dev->mdiobus->parent = &dev->udev->dev;
2315 
2316 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2317 		 dev->udev->bus->busnum, dev->udev->devnum);
2318 
2319 	switch (dev->chipid) {
2320 	case ID_REV_CHIP_ID_7800_:
2321 	case ID_REV_CHIP_ID_7850_:
2322 		/* set to internal PHY id */
2323 		dev->mdiobus->phy_mask = ~(1 << 1);
2324 		break;
2325 	case ID_REV_CHIP_ID_7801_:
2326 		/* scan thru PHYAD[2..0] */
2327 		dev->mdiobus->phy_mask = ~(0xFF);
2328 		break;
2329 	}
2330 
2331 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2332 	ret = of_mdiobus_register(dev->mdiobus, node);
2333 	of_node_put(node);
2334 	if (ret) {
2335 		netdev_err(dev->net, "can't register MDIO bus\n");
2336 		goto exit1;
2337 	}
2338 
2339 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2340 	return 0;
2341 exit1:
2342 	mdiobus_free(dev->mdiobus);
2343 	return ret;
2344 }
2345 
2346 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2347 {
2348 	mdiobus_unregister(dev->mdiobus);
2349 	mdiobus_free(dev->mdiobus);
2350 }
2351 
2352 static void lan78xx_link_status_change(struct net_device *net)
2353 {
2354 	struct lan78xx_net *dev = netdev_priv(net);
2355 	struct phy_device *phydev = net->phydev;
2356 	u32 data;
2357 	int ret;
2358 
2359 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2360 	if (ret < 0)
2361 		return;
2362 
2363 	if (phydev->enable_tx_lpi)
2364 		data |=  MAC_CR_EEE_EN_;
2365 	else
2366 		data &= ~MAC_CR_EEE_EN_;
2367 	lan78xx_write_reg(dev, MAC_CR, data);
2368 
2369 	phy_print_status(phydev);
2370 }
2371 
2372 static int irq_map(struct irq_domain *d, unsigned int irq,
2373 		   irq_hw_number_t hwirq)
2374 {
2375 	struct irq_domain_data *data = d->host_data;
2376 
2377 	irq_set_chip_data(irq, data);
2378 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2379 	irq_set_noprobe(irq);
2380 
2381 	return 0;
2382 }
2383 
2384 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2385 {
2386 	irq_set_chip_and_handler(irq, NULL, NULL);
2387 	irq_set_chip_data(irq, NULL);
2388 }
2389 
2390 static const struct irq_domain_ops chip_domain_ops = {
2391 	.map	= irq_map,
2392 	.unmap	= irq_unmap,
2393 };
2394 
2395 static void lan78xx_irq_mask(struct irq_data *irqd)
2396 {
2397 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2398 
2399 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2400 }
2401 
2402 static void lan78xx_irq_unmask(struct irq_data *irqd)
2403 {
2404 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2405 
2406 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2407 }
2408 
2409 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2410 {
2411 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2412 
2413 	mutex_lock(&data->irq_lock);
2414 }
2415 
2416 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2417 {
2418 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2419 	struct lan78xx_net *dev =
2420 			container_of(data, struct lan78xx_net, domain_data);
2421 	u32 buf;
2422 	int ret;
2423 
2424 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2425 	 * are only two callbacks executed in non-atomic contex.
2426 	 */
2427 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2428 	if (ret < 0)
2429 		goto irq_bus_sync_unlock;
2430 
2431 	if (buf != data->irqenable)
2432 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2433 
2434 irq_bus_sync_unlock:
2435 	if (ret < 0)
2436 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2437 			   ERR_PTR(ret));
2438 
2439 	mutex_unlock(&data->irq_lock);
2440 }
2441 
2442 static struct irq_chip lan78xx_irqchip = {
2443 	.name			= "lan78xx-irqs",
2444 	.irq_mask		= lan78xx_irq_mask,
2445 	.irq_unmask		= lan78xx_irq_unmask,
2446 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2447 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2448 };
2449 
2450 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2451 {
2452 	struct device_node *of_node;
2453 	struct irq_domain *irqdomain;
2454 	unsigned int irqmap = 0;
2455 	u32 buf;
2456 	int ret = 0;
2457 
2458 	of_node = dev->udev->dev.parent->of_node;
2459 
2460 	mutex_init(&dev->domain_data.irq_lock);
2461 
2462 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2463 	if (ret < 0)
2464 		return ret;
2465 
2466 	dev->domain_data.irqenable = buf;
2467 
2468 	dev->domain_data.irqchip = &lan78xx_irqchip;
2469 	dev->domain_data.irq_handler = handle_simple_irq;
2470 
2471 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2472 					  &chip_domain_ops, &dev->domain_data);
2473 	if (irqdomain) {
2474 		/* create mapping for PHY interrupt */
2475 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2476 		if (!irqmap) {
2477 			irq_domain_remove(irqdomain);
2478 
2479 			irqdomain = NULL;
2480 			ret = -EINVAL;
2481 		}
2482 	} else {
2483 		ret = -EINVAL;
2484 	}
2485 
2486 	dev->domain_data.irqdomain = irqdomain;
2487 	dev->domain_data.phyirq = irqmap;
2488 
2489 	return ret;
2490 }
2491 
2492 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2493 {
2494 	if (dev->domain_data.phyirq > 0) {
2495 		irq_dispose_mapping(dev->domain_data.phyirq);
2496 
2497 		if (dev->domain_data.irqdomain)
2498 			irq_domain_remove(dev->domain_data.irqdomain);
2499 	}
2500 	dev->domain_data.phyirq = 0;
2501 	dev->domain_data.irqdomain = NULL;
2502 }
2503 
2504 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2505 {
2506 	u32 buf;
2507 	int ret;
2508 	struct fixed_phy_status fphy_status = {
2509 		.link = 1,
2510 		.speed = SPEED_1000,
2511 		.duplex = DUPLEX_FULL,
2512 	};
2513 	struct phy_device *phydev;
2514 
2515 	phydev = phy_find_first(dev->mdiobus);
2516 	if (!phydev) {
2517 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2518 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2519 		if (IS_ERR(phydev)) {
2520 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2521 			return NULL;
2522 		}
2523 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2524 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2525 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2526 					MAC_RGMII_ID_TXC_DELAY_EN_);
2527 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2528 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2529 		buf |= HW_CFG_CLK125_EN_;
2530 		buf |= HW_CFG_REFCLK25_EN_;
2531 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2532 	} else {
2533 		if (!phydev->drv) {
2534 			netdev_err(dev->net, "no PHY driver found\n");
2535 			return NULL;
2536 		}
2537 		dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2538 		/* The PHY driver is responsible to configure proper RGMII
2539 		 * interface delays. Disable RGMII delays on MAC side.
2540 		 */
2541 		lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2542 
2543 		phydev->is_internal = false;
2544 	}
2545 	return phydev;
2546 }
2547 
2548 static int lan78xx_phy_init(struct lan78xx_net *dev)
2549 {
2550 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2551 	int ret;
2552 	u32 mii_adv;
2553 	struct phy_device *phydev;
2554 
2555 	switch (dev->chipid) {
2556 	case ID_REV_CHIP_ID_7801_:
2557 		phydev = lan7801_phy_init(dev);
2558 		if (!phydev) {
2559 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2560 			return -EIO;
2561 		}
2562 		break;
2563 
2564 	case ID_REV_CHIP_ID_7800_:
2565 	case ID_REV_CHIP_ID_7850_:
2566 		phydev = phy_find_first(dev->mdiobus);
2567 		if (!phydev) {
2568 			netdev_err(dev->net, "no PHY found\n");
2569 			return -EIO;
2570 		}
2571 		phydev->is_internal = true;
2572 		dev->interface = PHY_INTERFACE_MODE_GMII;
2573 		break;
2574 
2575 	default:
2576 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2577 		return -EIO;
2578 	}
2579 
2580 	/* if phyirq is not set, use polling mode in phylib */
2581 	if (dev->domain_data.phyirq > 0)
2582 		phydev->irq = dev->domain_data.phyirq;
2583 	else
2584 		phydev->irq = PHY_POLL;
2585 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2586 
2587 	/* set to AUTOMDIX */
2588 	phydev->mdix = ETH_TP_MDI_AUTO;
2589 
2590 	ret = phy_connect_direct(dev->net, phydev,
2591 				 lan78xx_link_status_change,
2592 				 dev->interface);
2593 	if (ret) {
2594 		netdev_err(dev->net, "can't attach PHY to %s\n",
2595 			   dev->mdiobus->id);
2596 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2597 			if (phy_is_pseudo_fixed_link(phydev)) {
2598 				fixed_phy_unregister(phydev);
2599 				phy_device_free(phydev);
2600 			}
2601 		}
2602 		return -EIO;
2603 	}
2604 
2605 	/* MAC doesn't support 1000T Half */
2606 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2607 
2608 	/* support both flow controls */
2609 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2610 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2611 			   phydev->advertising);
2612 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2613 			   phydev->advertising);
2614 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2615 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2616 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2617 
2618 	phy_support_eee(phydev);
2619 
2620 	if (phydev->mdio.dev.of_node) {
2621 		u32 reg;
2622 		int len;
2623 
2624 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2625 						      "microchip,led-modes",
2626 						      sizeof(u32));
2627 		if (len >= 0) {
2628 			/* Ensure the appropriate LEDs are enabled */
2629 			lan78xx_read_reg(dev, HW_CFG, &reg);
2630 			reg &= ~(HW_CFG_LED0_EN_ |
2631 				 HW_CFG_LED1_EN_ |
2632 				 HW_CFG_LED2_EN_ |
2633 				 HW_CFG_LED3_EN_);
2634 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2635 				(len > 1) * HW_CFG_LED1_EN_ |
2636 				(len > 2) * HW_CFG_LED2_EN_ |
2637 				(len > 3) * HW_CFG_LED3_EN_;
2638 			lan78xx_write_reg(dev, HW_CFG, reg);
2639 		}
2640 	}
2641 
2642 	genphy_config_aneg(phydev);
2643 
2644 	dev->fc_autoneg = phydev->autoneg;
2645 
2646 	return 0;
2647 }
2648 
2649 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2650 {
2651 	bool rxenabled;
2652 	u32 buf;
2653 	int ret;
2654 
2655 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2656 	if (ret < 0)
2657 		return ret;
2658 
2659 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2660 
2661 	if (rxenabled) {
2662 		buf &= ~MAC_RX_RXEN_;
2663 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2664 		if (ret < 0)
2665 			return ret;
2666 	}
2667 
2668 	/* add 4 to size for FCS */
2669 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2670 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2671 
2672 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2673 	if (ret < 0)
2674 		return ret;
2675 
2676 	if (rxenabled) {
2677 		buf |= MAC_RX_RXEN_;
2678 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2679 		if (ret < 0)
2680 			return ret;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2687 {
2688 	struct sk_buff *skb;
2689 	unsigned long flags;
2690 	int count = 0;
2691 
2692 	spin_lock_irqsave(&q->lock, flags);
2693 	while (!skb_queue_empty(q)) {
2694 		struct skb_data	*entry;
2695 		struct urb *urb;
2696 		int ret;
2697 
2698 		skb_queue_walk(q, skb) {
2699 			entry = (struct skb_data *)skb->cb;
2700 			if (entry->state != unlink_start)
2701 				goto found;
2702 		}
2703 		break;
2704 found:
2705 		entry->state = unlink_start;
2706 		urb = entry->urb;
2707 
2708 		/* Get reference count of the URB to avoid it to be
2709 		 * freed during usb_unlink_urb, which may trigger
2710 		 * use-after-free problem inside usb_unlink_urb since
2711 		 * usb_unlink_urb is always racing with .complete
2712 		 * handler(include defer_bh).
2713 		 */
2714 		usb_get_urb(urb);
2715 		spin_unlock_irqrestore(&q->lock, flags);
2716 		/* during some PM-driven resume scenarios,
2717 		 * these (async) unlinks complete immediately
2718 		 */
2719 		ret = usb_unlink_urb(urb);
2720 		if (ret != -EINPROGRESS && ret != 0)
2721 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2722 		else
2723 			count++;
2724 		usb_put_urb(urb);
2725 		spin_lock_irqsave(&q->lock, flags);
2726 	}
2727 	spin_unlock_irqrestore(&q->lock, flags);
2728 	return count;
2729 }
2730 
2731 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2732 {
2733 	struct lan78xx_net *dev = netdev_priv(netdev);
2734 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2735 	int ret;
2736 
2737 	/* no second zero-length packet read wanted after mtu-sized packets */
2738 	if ((max_frame_len % dev->maxpacket) == 0)
2739 		return -EDOM;
2740 
2741 	ret = usb_autopm_get_interface(dev->intf);
2742 	if (ret < 0)
2743 		return ret;
2744 
2745 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2746 	if (ret < 0)
2747 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2748 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2749 	else
2750 		WRITE_ONCE(netdev->mtu, new_mtu);
2751 
2752 	usb_autopm_put_interface(dev->intf);
2753 
2754 	return ret;
2755 }
2756 
2757 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2758 {
2759 	struct lan78xx_net *dev = netdev_priv(netdev);
2760 	struct sockaddr *addr = p;
2761 	u32 addr_lo, addr_hi;
2762 	int ret;
2763 
2764 	if (netif_running(netdev))
2765 		return -EBUSY;
2766 
2767 	if (!is_valid_ether_addr(addr->sa_data))
2768 		return -EADDRNOTAVAIL;
2769 
2770 	eth_hw_addr_set(netdev, addr->sa_data);
2771 
2772 	addr_lo = netdev->dev_addr[0] |
2773 		  netdev->dev_addr[1] << 8 |
2774 		  netdev->dev_addr[2] << 16 |
2775 		  netdev->dev_addr[3] << 24;
2776 	addr_hi = netdev->dev_addr[4] |
2777 		  netdev->dev_addr[5] << 8;
2778 
2779 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2780 	if (ret < 0)
2781 		return ret;
2782 
2783 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2784 	if (ret < 0)
2785 		return ret;
2786 
2787 	/* Added to support MAC address changes */
2788 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2789 	if (ret < 0)
2790 		return ret;
2791 
2792 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2793 }
2794 
2795 /* Enable or disable Rx checksum offload engine */
2796 static int lan78xx_set_features(struct net_device *netdev,
2797 				netdev_features_t features)
2798 {
2799 	struct lan78xx_net *dev = netdev_priv(netdev);
2800 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2801 	unsigned long flags;
2802 
2803 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2804 
2805 	if (features & NETIF_F_RXCSUM) {
2806 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2807 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2808 	} else {
2809 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2810 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2811 	}
2812 
2813 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2814 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2815 	else
2816 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2817 
2818 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2819 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2820 	else
2821 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2822 
2823 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2824 
2825 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2826 }
2827 
2828 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2829 {
2830 	struct lan78xx_priv *pdata =
2831 			container_of(param, struct lan78xx_priv, set_vlan);
2832 	struct lan78xx_net *dev = pdata->dev;
2833 
2834 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2835 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2836 }
2837 
2838 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2839 				   __be16 proto, u16 vid)
2840 {
2841 	struct lan78xx_net *dev = netdev_priv(netdev);
2842 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2843 	u16 vid_bit_index;
2844 	u16 vid_dword_index;
2845 
2846 	vid_dword_index = (vid >> 5) & 0x7F;
2847 	vid_bit_index = vid & 0x1F;
2848 
2849 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2850 
2851 	/* defer register writes to a sleepable context */
2852 	schedule_work(&pdata->set_vlan);
2853 
2854 	return 0;
2855 }
2856 
2857 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2858 				    __be16 proto, u16 vid)
2859 {
2860 	struct lan78xx_net *dev = netdev_priv(netdev);
2861 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2862 	u16 vid_bit_index;
2863 	u16 vid_dword_index;
2864 
2865 	vid_dword_index = (vid >> 5) & 0x7F;
2866 	vid_bit_index = vid & 0x1F;
2867 
2868 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2869 
2870 	/* defer register writes to a sleepable context */
2871 	schedule_work(&pdata->set_vlan);
2872 
2873 	return 0;
2874 }
2875 
2876 static int lan78xx_init_ltm(struct lan78xx_net *dev)
2877 {
2878 	u32 regs[6] = { 0 };
2879 	int ret;
2880 	u32 buf;
2881 
2882 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2883 	if (ret < 0)
2884 		goto init_ltm_failed;
2885 
2886 	if (buf & USB_CFG1_LTM_ENABLE_) {
2887 		u8 temp[2];
2888 		/* Get values from EEPROM first */
2889 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2890 			if (temp[0] == 24) {
2891 				ret = lan78xx_read_raw_eeprom(dev,
2892 							      temp[1] * 2,
2893 							      24,
2894 							      (u8 *)regs);
2895 				if (ret < 0)
2896 					return ret;
2897 			}
2898 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2899 			if (temp[0] == 24) {
2900 				ret = lan78xx_read_raw_otp(dev,
2901 							   temp[1] * 2,
2902 							   24,
2903 							   (u8 *)regs);
2904 				if (ret < 0)
2905 					return ret;
2906 			}
2907 		}
2908 	}
2909 
2910 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2911 	if (ret < 0)
2912 		goto init_ltm_failed;
2913 
2914 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2915 	if (ret < 0)
2916 		goto init_ltm_failed;
2917 
2918 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2919 	if (ret < 0)
2920 		goto init_ltm_failed;
2921 
2922 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2923 	if (ret < 0)
2924 		goto init_ltm_failed;
2925 
2926 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2927 	if (ret < 0)
2928 		goto init_ltm_failed;
2929 
2930 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2931 	if (ret < 0)
2932 		goto init_ltm_failed;
2933 
2934 	return 0;
2935 
2936 init_ltm_failed:
2937 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
2938 	return ret;
2939 }
2940 
2941 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2942 {
2943 	int result = 0;
2944 
2945 	switch (dev->udev->speed) {
2946 	case USB_SPEED_SUPER:
2947 		dev->rx_urb_size = RX_SS_URB_SIZE;
2948 		dev->tx_urb_size = TX_SS_URB_SIZE;
2949 		dev->n_rx_urbs = RX_SS_URB_NUM;
2950 		dev->n_tx_urbs = TX_SS_URB_NUM;
2951 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2952 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2953 		break;
2954 	case USB_SPEED_HIGH:
2955 		dev->rx_urb_size = RX_HS_URB_SIZE;
2956 		dev->tx_urb_size = TX_HS_URB_SIZE;
2957 		dev->n_rx_urbs = RX_HS_URB_NUM;
2958 		dev->n_tx_urbs = TX_HS_URB_NUM;
2959 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2960 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2961 		break;
2962 	case USB_SPEED_FULL:
2963 		dev->rx_urb_size = RX_FS_URB_SIZE;
2964 		dev->tx_urb_size = TX_FS_URB_SIZE;
2965 		dev->n_rx_urbs = RX_FS_URB_NUM;
2966 		dev->n_tx_urbs = TX_FS_URB_NUM;
2967 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2968 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2969 		break;
2970 	default:
2971 		netdev_warn(dev->net, "USB bus speed not supported\n");
2972 		result = -EIO;
2973 		break;
2974 	}
2975 
2976 	return result;
2977 }
2978 
2979 static int lan78xx_reset(struct lan78xx_net *dev)
2980 {
2981 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2982 	unsigned long timeout;
2983 	int ret;
2984 	u32 buf;
2985 	u8 sig;
2986 
2987 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2988 	if (ret < 0)
2989 		return ret;
2990 
2991 	buf |= HW_CFG_LRST_;
2992 
2993 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	timeout = jiffies + HZ;
2998 	do {
2999 		mdelay(1);
3000 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3001 		if (ret < 0)
3002 			return ret;
3003 
3004 		if (time_after(jiffies, timeout)) {
3005 			netdev_warn(dev->net,
3006 				    "timeout on completion of LiteReset");
3007 			ret = -ETIMEDOUT;
3008 			return ret;
3009 		}
3010 	} while (buf & HW_CFG_LRST_);
3011 
3012 	ret = lan78xx_init_mac_address(dev);
3013 	if (ret < 0)
3014 		return ret;
3015 
3016 	/* save DEVID for later usage */
3017 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3018 	if (ret < 0)
3019 		return ret;
3020 
3021 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3022 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3023 
3024 	/* Respond to the IN token with a NAK */
3025 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3026 	if (ret < 0)
3027 		return ret;
3028 
3029 	buf |= USB_CFG_BIR_;
3030 
3031 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3032 	if (ret < 0)
3033 		return ret;
3034 
3035 	/* Init LTM */
3036 	ret = lan78xx_init_ltm(dev);
3037 	if (ret < 0)
3038 		return ret;
3039 
3040 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3041 	if (ret < 0)
3042 		return ret;
3043 
3044 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3045 	if (ret < 0)
3046 		return ret;
3047 
3048 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3049 	if (ret < 0)
3050 		return ret;
3051 
3052 	buf |= HW_CFG_MEF_;
3053 	buf |= HW_CFG_CLK125_EN_;
3054 	buf |= HW_CFG_REFCLK25_EN_;
3055 
3056 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3057 	if (ret < 0)
3058 		return ret;
3059 
3060 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3061 	if (ret < 0)
3062 		return ret;
3063 
3064 	buf |= USB_CFG_BCE_;
3065 
3066 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3067 	if (ret < 0)
3068 		return ret;
3069 
3070 	/* set FIFO sizes */
3071 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3072 
3073 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3074 	if (ret < 0)
3075 		return ret;
3076 
3077 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3078 
3079 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3080 	if (ret < 0)
3081 		return ret;
3082 
3083 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3084 	if (ret < 0)
3085 		return ret;
3086 
3087 	ret = lan78xx_write_reg(dev, FLOW, 0);
3088 	if (ret < 0)
3089 		return ret;
3090 
3091 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3092 	if (ret < 0)
3093 		return ret;
3094 
3095 	/* Don't need rfe_ctl_lock during initialisation */
3096 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3097 	if (ret < 0)
3098 		return ret;
3099 
3100 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3101 
3102 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3103 	if (ret < 0)
3104 		return ret;
3105 
3106 	/* Enable or disable checksum offload engines */
3107 	ret = lan78xx_set_features(dev->net, dev->net->features);
3108 	if (ret < 0)
3109 		return ret;
3110 
3111 	lan78xx_set_multicast(dev->net);
3112 
3113 	/* reset PHY */
3114 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3115 	if (ret < 0)
3116 		return ret;
3117 
3118 	buf |= PMT_CTL_PHY_RST_;
3119 
3120 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3121 	if (ret < 0)
3122 		return ret;
3123 
3124 	timeout = jiffies + HZ;
3125 	do {
3126 		mdelay(1);
3127 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3128 		if (ret < 0)
3129 			return ret;
3130 
3131 		if (time_after(jiffies, timeout)) {
3132 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3133 			ret = -ETIMEDOUT;
3134 			return ret;
3135 		}
3136 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3137 
3138 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3139 	if (ret < 0)
3140 		return ret;
3141 
3142 	/* LAN7801 only has RGMII mode */
3143 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3144 		buf &= ~MAC_CR_GMII_EN_;
3145 		/* Enable Auto Duplex and Auto speed */
3146 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3147 	}
3148 
3149 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3150 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3151 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3152 		if (!ret && sig != EEPROM_INDICATOR) {
3153 			/* Implies there is no external eeprom. Set mac speed */
3154 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3155 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3156 		}
3157 	}
3158 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3159 	if (ret < 0)
3160 		return ret;
3161 
3162 	ret = lan78xx_set_rx_max_frame_length(dev,
3163 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3164 
3165 	return ret;
3166 }
3167 
3168 static void lan78xx_init_stats(struct lan78xx_net *dev)
3169 {
3170 	u32 *p;
3171 	int i;
3172 
3173 	/* initialize for stats update
3174 	 * some counters are 20bits and some are 32bits
3175 	 */
3176 	p = (u32 *)&dev->stats.rollover_max;
3177 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3178 		p[i] = 0xFFFFF;
3179 
3180 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3181 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3182 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3183 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3184 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3185 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3186 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3187 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3188 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3189 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3190 
3191 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3192 }
3193 
3194 static int lan78xx_open(struct net_device *net)
3195 {
3196 	struct lan78xx_net *dev = netdev_priv(net);
3197 	int ret;
3198 
3199 	netif_dbg(dev, ifup, dev->net, "open device");
3200 
3201 	ret = usb_autopm_get_interface(dev->intf);
3202 	if (ret < 0)
3203 		return ret;
3204 
3205 	mutex_lock(&dev->dev_mutex);
3206 
3207 	phy_start(net->phydev);
3208 
3209 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3210 
3211 	/* for Link Check */
3212 	if (dev->urb_intr) {
3213 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3214 		if (ret < 0) {
3215 			netif_err(dev, ifup, dev->net,
3216 				  "intr submit %d\n", ret);
3217 			goto done;
3218 		}
3219 	}
3220 
3221 	ret = lan78xx_flush_rx_fifo(dev);
3222 	if (ret < 0)
3223 		goto done;
3224 	ret = lan78xx_flush_tx_fifo(dev);
3225 	if (ret < 0)
3226 		goto done;
3227 
3228 	ret = lan78xx_start_tx_path(dev);
3229 	if (ret < 0)
3230 		goto done;
3231 	ret = lan78xx_start_rx_path(dev);
3232 	if (ret < 0)
3233 		goto done;
3234 
3235 	lan78xx_init_stats(dev);
3236 
3237 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3238 
3239 	netif_start_queue(net);
3240 
3241 	dev->link_on = false;
3242 
3243 	napi_enable(&dev->napi);
3244 
3245 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3246 done:
3247 	mutex_unlock(&dev->dev_mutex);
3248 
3249 	if (ret < 0)
3250 		usb_autopm_put_interface(dev->intf);
3251 
3252 	return ret;
3253 }
3254 
3255 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3256 {
3257 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3258 	DECLARE_WAITQUEUE(wait, current);
3259 	int temp;
3260 
3261 	/* ensure there are no more active urbs */
3262 	add_wait_queue(&unlink_wakeup, &wait);
3263 	set_current_state(TASK_UNINTERRUPTIBLE);
3264 	dev->wait = &unlink_wakeup;
3265 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3266 
3267 	/* maybe wait for deletions to finish. */
3268 	while (!skb_queue_empty(&dev->rxq) ||
3269 	       !skb_queue_empty(&dev->txq)) {
3270 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3271 		set_current_state(TASK_UNINTERRUPTIBLE);
3272 		netif_dbg(dev, ifdown, dev->net,
3273 			  "waited for %d urb completions", temp);
3274 	}
3275 	set_current_state(TASK_RUNNING);
3276 	dev->wait = NULL;
3277 	remove_wait_queue(&unlink_wakeup, &wait);
3278 
3279 	/* empty Rx done, Rx overflow and Tx pend queues
3280 	 */
3281 	while (!skb_queue_empty(&dev->rxq_done)) {
3282 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3283 
3284 		lan78xx_release_rx_buf(dev, skb);
3285 	}
3286 
3287 	skb_queue_purge(&dev->rxq_overflow);
3288 	skb_queue_purge(&dev->txq_pend);
3289 }
3290 
3291 static int lan78xx_stop(struct net_device *net)
3292 {
3293 	struct lan78xx_net *dev = netdev_priv(net);
3294 
3295 	netif_dbg(dev, ifup, dev->net, "stop device");
3296 
3297 	mutex_lock(&dev->dev_mutex);
3298 
3299 	if (timer_pending(&dev->stat_monitor))
3300 		del_timer_sync(&dev->stat_monitor);
3301 
3302 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3303 	netif_stop_queue(net);
3304 	napi_disable(&dev->napi);
3305 
3306 	lan78xx_terminate_urbs(dev);
3307 
3308 	netif_info(dev, ifdown, dev->net,
3309 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3310 		   net->stats.rx_packets, net->stats.tx_packets,
3311 		   net->stats.rx_errors, net->stats.tx_errors);
3312 
3313 	/* ignore errors that occur stopping the Tx and Rx data paths */
3314 	lan78xx_stop_tx_path(dev);
3315 	lan78xx_stop_rx_path(dev);
3316 
3317 	if (net->phydev)
3318 		phy_stop(net->phydev);
3319 
3320 	usb_kill_urb(dev->urb_intr);
3321 
3322 	/* deferred work (task, timer, softirq) must also stop.
3323 	 * can't flush_scheduled_work() until we drop rtnl (later),
3324 	 * else workers could deadlock; so make workers a NOP.
3325 	 */
3326 	clear_bit(EVENT_TX_HALT, &dev->flags);
3327 	clear_bit(EVENT_RX_HALT, &dev->flags);
3328 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3329 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3330 
3331 	cancel_delayed_work_sync(&dev->wq);
3332 
3333 	usb_autopm_put_interface(dev->intf);
3334 
3335 	mutex_unlock(&dev->dev_mutex);
3336 
3337 	return 0;
3338 }
3339 
3340 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3341 			       struct sk_buff_head *list, enum skb_state state)
3342 {
3343 	unsigned long flags;
3344 	enum skb_state old_state;
3345 	struct skb_data *entry = (struct skb_data *)skb->cb;
3346 
3347 	spin_lock_irqsave(&list->lock, flags);
3348 	old_state = entry->state;
3349 	entry->state = state;
3350 
3351 	__skb_unlink(skb, list);
3352 	spin_unlock(&list->lock);
3353 	spin_lock(&dev->rxq_done.lock);
3354 
3355 	__skb_queue_tail(&dev->rxq_done, skb);
3356 	if (skb_queue_len(&dev->rxq_done) == 1)
3357 		napi_schedule(&dev->napi);
3358 
3359 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3360 
3361 	return old_state;
3362 }
3363 
3364 static void tx_complete(struct urb *urb)
3365 {
3366 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3367 	struct skb_data *entry = (struct skb_data *)skb->cb;
3368 	struct lan78xx_net *dev = entry->dev;
3369 
3370 	if (urb->status == 0) {
3371 		dev->net->stats.tx_packets += entry->num_of_packet;
3372 		dev->net->stats.tx_bytes += entry->length;
3373 	} else {
3374 		dev->net->stats.tx_errors += entry->num_of_packet;
3375 
3376 		switch (urb->status) {
3377 		case -EPIPE:
3378 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3379 			break;
3380 
3381 		/* software-driven interface shutdown */
3382 		case -ECONNRESET:
3383 		case -ESHUTDOWN:
3384 			netif_dbg(dev, tx_err, dev->net,
3385 				  "tx err interface gone %d\n",
3386 				  entry->urb->status);
3387 			break;
3388 
3389 		case -EPROTO:
3390 		case -ETIME:
3391 		case -EILSEQ:
3392 			netif_stop_queue(dev->net);
3393 			netif_dbg(dev, tx_err, dev->net,
3394 				  "tx err queue stopped %d\n",
3395 				  entry->urb->status);
3396 			break;
3397 		default:
3398 			netif_dbg(dev, tx_err, dev->net,
3399 				  "unknown tx err %d\n",
3400 				  entry->urb->status);
3401 			break;
3402 		}
3403 	}
3404 
3405 	usb_autopm_put_interface_async(dev->intf);
3406 
3407 	skb_unlink(skb, &dev->txq);
3408 
3409 	lan78xx_release_tx_buf(dev, skb);
3410 
3411 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3412 	 */
3413 	if (skb_queue_empty(&dev->txq) &&
3414 	    !skb_queue_empty(&dev->txq_pend))
3415 		napi_schedule(&dev->napi);
3416 }
3417 
3418 static void lan78xx_queue_skb(struct sk_buff_head *list,
3419 			      struct sk_buff *newsk, enum skb_state state)
3420 {
3421 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3422 
3423 	__skb_queue_tail(list, newsk);
3424 	entry->state = state;
3425 }
3426 
3427 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3428 {
3429 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3430 }
3431 
3432 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3433 {
3434 	return dev->tx_pend_data_len;
3435 }
3436 
3437 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3438 				    struct sk_buff *skb,
3439 				    unsigned int *tx_pend_data_len)
3440 {
3441 	unsigned long flags;
3442 
3443 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3444 
3445 	__skb_queue_tail(&dev->txq_pend, skb);
3446 
3447 	dev->tx_pend_data_len += skb->len;
3448 	*tx_pend_data_len = dev->tx_pend_data_len;
3449 
3450 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3451 }
3452 
3453 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3454 					 struct sk_buff *skb,
3455 					 unsigned int *tx_pend_data_len)
3456 {
3457 	unsigned long flags;
3458 
3459 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3460 
3461 	__skb_queue_head(&dev->txq_pend, skb);
3462 
3463 	dev->tx_pend_data_len += skb->len;
3464 	*tx_pend_data_len = dev->tx_pend_data_len;
3465 
3466 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3467 }
3468 
3469 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3470 				    struct sk_buff **skb,
3471 				    unsigned int *tx_pend_data_len)
3472 {
3473 	unsigned long flags;
3474 
3475 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3476 
3477 	*skb = __skb_dequeue(&dev->txq_pend);
3478 	if (*skb)
3479 		dev->tx_pend_data_len -= (*skb)->len;
3480 	*tx_pend_data_len = dev->tx_pend_data_len;
3481 
3482 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3483 }
3484 
3485 static netdev_tx_t
3486 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3487 {
3488 	struct lan78xx_net *dev = netdev_priv(net);
3489 	unsigned int tx_pend_data_len;
3490 
3491 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3492 		schedule_delayed_work(&dev->wq, 0);
3493 
3494 	skb_tx_timestamp(skb);
3495 
3496 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3497 
3498 	/* Set up a Tx URB if none is in progress */
3499 
3500 	if (skb_queue_empty(&dev->txq))
3501 		napi_schedule(&dev->napi);
3502 
3503 	/* Stop stack Tx queue if we have enough data to fill
3504 	 * all the free Tx URBs.
3505 	 */
3506 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3507 		netif_stop_queue(net);
3508 
3509 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3510 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3511 
3512 		/* Kick off transmission of pending data */
3513 
3514 		if (!skb_queue_empty(&dev->txq_free))
3515 			napi_schedule(&dev->napi);
3516 	}
3517 
3518 	return NETDEV_TX_OK;
3519 }
3520 
3521 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3522 {
3523 	struct lan78xx_priv *pdata = NULL;
3524 	int ret;
3525 	int i;
3526 
3527 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3528 
3529 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3530 	if (!pdata) {
3531 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3532 		return -ENOMEM;
3533 	}
3534 
3535 	pdata->dev = dev;
3536 
3537 	spin_lock_init(&pdata->rfe_ctl_lock);
3538 	mutex_init(&pdata->dataport_mutex);
3539 
3540 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3541 
3542 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3543 		pdata->vlan_table[i] = 0;
3544 
3545 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3546 
3547 	dev->net->features = 0;
3548 
3549 	if (DEFAULT_TX_CSUM_ENABLE)
3550 		dev->net->features |= NETIF_F_HW_CSUM;
3551 
3552 	if (DEFAULT_RX_CSUM_ENABLE)
3553 		dev->net->features |= NETIF_F_RXCSUM;
3554 
3555 	if (DEFAULT_TSO_CSUM_ENABLE)
3556 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3557 
3558 	if (DEFAULT_VLAN_RX_OFFLOAD)
3559 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3560 
3561 	if (DEFAULT_VLAN_FILTER_ENABLE)
3562 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3563 
3564 	dev->net->hw_features = dev->net->features;
3565 
3566 	ret = lan78xx_setup_irq_domain(dev);
3567 	if (ret < 0) {
3568 		netdev_warn(dev->net,
3569 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3570 		goto out1;
3571 	}
3572 
3573 	/* Init all registers */
3574 	ret = lan78xx_reset(dev);
3575 	if (ret) {
3576 		netdev_warn(dev->net, "Registers INIT FAILED....");
3577 		goto out2;
3578 	}
3579 
3580 	ret = lan78xx_mdio_init(dev);
3581 	if (ret) {
3582 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3583 		goto out2;
3584 	}
3585 
3586 	dev->net->flags |= IFF_MULTICAST;
3587 
3588 	pdata->wol = WAKE_MAGIC;
3589 
3590 	return ret;
3591 
3592 out2:
3593 	lan78xx_remove_irq_domain(dev);
3594 
3595 out1:
3596 	netdev_warn(dev->net, "Bind routine FAILED");
3597 	cancel_work_sync(&pdata->set_multicast);
3598 	cancel_work_sync(&pdata->set_vlan);
3599 	kfree(pdata);
3600 	return ret;
3601 }
3602 
3603 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3604 {
3605 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3606 
3607 	lan78xx_remove_irq_domain(dev);
3608 
3609 	lan78xx_remove_mdio(dev);
3610 
3611 	if (pdata) {
3612 		cancel_work_sync(&pdata->set_multicast);
3613 		cancel_work_sync(&pdata->set_vlan);
3614 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3615 		kfree(pdata);
3616 		pdata = NULL;
3617 		dev->data[0] = 0;
3618 	}
3619 }
3620 
3621 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3622 				    struct sk_buff *skb,
3623 				    u32 rx_cmd_a, u32 rx_cmd_b)
3624 {
3625 	/* HW Checksum offload appears to be flawed if used when not stripping
3626 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3627 	 */
3628 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3629 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3630 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3631 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3632 		skb->ip_summed = CHECKSUM_NONE;
3633 	} else {
3634 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3635 		skb->ip_summed = CHECKSUM_COMPLETE;
3636 	}
3637 }
3638 
3639 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3640 				    struct sk_buff *skb,
3641 				    u32 rx_cmd_a, u32 rx_cmd_b)
3642 {
3643 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3644 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3645 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3646 				       (rx_cmd_b & 0xffff));
3647 }
3648 
3649 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3650 {
3651 	dev->net->stats.rx_packets++;
3652 	dev->net->stats.rx_bytes += skb->len;
3653 
3654 	skb->protocol = eth_type_trans(skb, dev->net);
3655 
3656 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3657 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3658 	memset(skb->cb, 0, sizeof(struct skb_data));
3659 
3660 	if (skb_defer_rx_timestamp(skb))
3661 		return;
3662 
3663 	napi_gro_receive(&dev->napi, skb);
3664 }
3665 
3666 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3667 		      int budget, int *work_done)
3668 {
3669 	if (skb->len < RX_SKB_MIN_LEN)
3670 		return 0;
3671 
3672 	/* Extract frames from the URB buffer and pass each one to
3673 	 * the stack in a new NAPI SKB.
3674 	 */
3675 	while (skb->len > 0) {
3676 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3677 		u16 rx_cmd_c;
3678 		unsigned char *packet;
3679 
3680 		rx_cmd_a = get_unaligned_le32(skb->data);
3681 		skb_pull(skb, sizeof(rx_cmd_a));
3682 
3683 		rx_cmd_b = get_unaligned_le32(skb->data);
3684 		skb_pull(skb, sizeof(rx_cmd_b));
3685 
3686 		rx_cmd_c = get_unaligned_le16(skb->data);
3687 		skb_pull(skb, sizeof(rx_cmd_c));
3688 
3689 		packet = skb->data;
3690 
3691 		/* get the packet length */
3692 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3693 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3694 
3695 		if (unlikely(size > skb->len)) {
3696 			netif_dbg(dev, rx_err, dev->net,
3697 				  "size err rx_cmd_a=0x%08x\n",
3698 				  rx_cmd_a);
3699 			return 0;
3700 		}
3701 
3702 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3703 			netif_dbg(dev, rx_err, dev->net,
3704 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3705 		} else {
3706 			u32 frame_len;
3707 			struct sk_buff *skb2;
3708 
3709 			if (unlikely(size < ETH_FCS_LEN)) {
3710 				netif_dbg(dev, rx_err, dev->net,
3711 					  "size err rx_cmd_a=0x%08x\n",
3712 					  rx_cmd_a);
3713 				return 0;
3714 			}
3715 
3716 			frame_len = size - ETH_FCS_LEN;
3717 
3718 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3719 			if (!skb2)
3720 				return 0;
3721 
3722 			memcpy(skb2->data, packet, frame_len);
3723 
3724 			skb_put(skb2, frame_len);
3725 
3726 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3727 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3728 
3729 			/* Processing of the URB buffer must complete once
3730 			 * it has started. If the NAPI work budget is exhausted
3731 			 * while frames remain they are added to the overflow
3732 			 * queue for delivery in the next NAPI polling cycle.
3733 			 */
3734 			if (*work_done < budget) {
3735 				lan78xx_skb_return(dev, skb2);
3736 				++(*work_done);
3737 			} else {
3738 				skb_queue_tail(&dev->rxq_overflow, skb2);
3739 			}
3740 		}
3741 
3742 		skb_pull(skb, size);
3743 
3744 		/* skip padding bytes before the next frame starts */
3745 		if (skb->len)
3746 			skb_pull(skb, align_count);
3747 	}
3748 
3749 	return 1;
3750 }
3751 
3752 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3753 			      int budget, int *work_done)
3754 {
3755 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3756 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3757 		dev->net->stats.rx_errors++;
3758 	}
3759 }
3760 
3761 static void rx_complete(struct urb *urb)
3762 {
3763 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3764 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3765 	struct lan78xx_net *dev = entry->dev;
3766 	int urb_status = urb->status;
3767 	enum skb_state state;
3768 
3769 	netif_dbg(dev, rx_status, dev->net,
3770 		  "rx done: status %d", urb->status);
3771 
3772 	skb_put(skb, urb->actual_length);
3773 	state = rx_done;
3774 
3775 	if (urb != entry->urb)
3776 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3777 
3778 	switch (urb_status) {
3779 	case 0:
3780 		if (skb->len < RX_SKB_MIN_LEN) {
3781 			state = rx_cleanup;
3782 			dev->net->stats.rx_errors++;
3783 			dev->net->stats.rx_length_errors++;
3784 			netif_dbg(dev, rx_err, dev->net,
3785 				  "rx length %d\n", skb->len);
3786 		}
3787 		usb_mark_last_busy(dev->udev);
3788 		break;
3789 	case -EPIPE:
3790 		dev->net->stats.rx_errors++;
3791 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3792 		fallthrough;
3793 	case -ECONNRESET:				/* async unlink */
3794 	case -ESHUTDOWN:				/* hardware gone */
3795 		netif_dbg(dev, ifdown, dev->net,
3796 			  "rx shutdown, code %d\n", urb_status);
3797 		state = rx_cleanup;
3798 		break;
3799 	case -EPROTO:
3800 	case -ETIME:
3801 	case -EILSEQ:
3802 		dev->net->stats.rx_errors++;
3803 		state = rx_cleanup;
3804 		break;
3805 
3806 	/* data overrun ... flush fifo? */
3807 	case -EOVERFLOW:
3808 		dev->net->stats.rx_over_errors++;
3809 		fallthrough;
3810 
3811 	default:
3812 		state = rx_cleanup;
3813 		dev->net->stats.rx_errors++;
3814 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3815 		break;
3816 	}
3817 
3818 	state = defer_bh(dev, skb, &dev->rxq, state);
3819 }
3820 
3821 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3822 {
3823 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3824 	size_t size = dev->rx_urb_size;
3825 	struct urb *urb = entry->urb;
3826 	unsigned long lockflags;
3827 	int ret = 0;
3828 
3829 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3830 			  skb->data, size, rx_complete, skb);
3831 
3832 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3833 
3834 	if (netif_device_present(dev->net) &&
3835 	    netif_running(dev->net) &&
3836 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3837 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3838 		ret = usb_submit_urb(urb, flags);
3839 		switch (ret) {
3840 		case 0:
3841 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3842 			break;
3843 		case -EPIPE:
3844 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3845 			break;
3846 		case -ENODEV:
3847 		case -ENOENT:
3848 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3849 			netif_device_detach(dev->net);
3850 			break;
3851 		case -EHOSTUNREACH:
3852 			ret = -ENOLINK;
3853 			napi_schedule(&dev->napi);
3854 			break;
3855 		default:
3856 			netif_dbg(dev, rx_err, dev->net,
3857 				  "rx submit, %d\n", ret);
3858 			napi_schedule(&dev->napi);
3859 			break;
3860 		}
3861 	} else {
3862 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3863 		ret = -ENOLINK;
3864 	}
3865 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3866 
3867 	if (ret)
3868 		lan78xx_release_rx_buf(dev, skb);
3869 
3870 	return ret;
3871 }
3872 
3873 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3874 {
3875 	struct sk_buff *rx_buf;
3876 
3877 	/* Ensure the maximum number of Rx URBs is submitted
3878 	 */
3879 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3880 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3881 			break;
3882 	}
3883 }
3884 
3885 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3886 				    struct sk_buff *rx_buf)
3887 {
3888 	/* reset SKB data pointers */
3889 
3890 	rx_buf->data = rx_buf->head;
3891 	skb_reset_tail_pointer(rx_buf);
3892 	rx_buf->len = 0;
3893 	rx_buf->data_len = 0;
3894 
3895 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3896 }
3897 
3898 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3899 {
3900 	u32 tx_cmd_a;
3901 	u32 tx_cmd_b;
3902 
3903 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3904 
3905 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3906 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3907 
3908 	tx_cmd_b = 0;
3909 	if (skb_is_gso(skb)) {
3910 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3911 
3912 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3913 
3914 		tx_cmd_a |= TX_CMD_A_LSO_;
3915 	}
3916 
3917 	if (skb_vlan_tag_present(skb)) {
3918 		tx_cmd_a |= TX_CMD_A_IVTG_;
3919 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3920 	}
3921 
3922 	put_unaligned_le32(tx_cmd_a, buffer);
3923 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3924 }
3925 
3926 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3927 					    struct sk_buff *tx_buf)
3928 {
3929 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3930 	int remain = dev->tx_urb_size;
3931 	u8 *tx_data = tx_buf->data;
3932 	u32 urb_len = 0;
3933 
3934 	entry->num_of_packet = 0;
3935 	entry->length = 0;
3936 
3937 	/* Work through the pending SKBs and copy the data of each SKB into
3938 	 * the URB buffer if there room for all the SKB data.
3939 	 *
3940 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3941 	 */
3942 	while (remain >= TX_SKB_MIN_LEN) {
3943 		unsigned int pending_bytes;
3944 		unsigned int align_bytes;
3945 		struct sk_buff *skb;
3946 		unsigned int len;
3947 
3948 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3949 
3950 		if (!skb)
3951 			break;
3952 
3953 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3954 			      TX_ALIGNMENT;
3955 		len = align_bytes + TX_CMD_LEN + skb->len;
3956 		if (len > remain) {
3957 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3958 			break;
3959 		}
3960 
3961 		tx_data += align_bytes;
3962 
3963 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3964 		tx_data += TX_CMD_LEN;
3965 
3966 		len = skb->len;
3967 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3968 			struct net_device_stats *stats = &dev->net->stats;
3969 
3970 			stats->tx_dropped++;
3971 			dev_kfree_skb_any(skb);
3972 			tx_data -= TX_CMD_LEN;
3973 			continue;
3974 		}
3975 
3976 		tx_data += len;
3977 		entry->length += len;
3978 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3979 
3980 		dev_kfree_skb_any(skb);
3981 
3982 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3983 
3984 		remain = dev->tx_urb_size - urb_len;
3985 	}
3986 
3987 	skb_put(tx_buf, urb_len);
3988 
3989 	return entry;
3990 }
3991 
3992 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3993 {
3994 	int ret;
3995 
3996 	/* Start the stack Tx queue if it was stopped
3997 	 */
3998 	netif_tx_lock(dev->net);
3999 	if (netif_queue_stopped(dev->net)) {
4000 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4001 			netif_wake_queue(dev->net);
4002 	}
4003 	netif_tx_unlock(dev->net);
4004 
4005 	/* Go through the Tx pending queue and set up URBs to transfer
4006 	 * the data to the device. Stop if no more pending data or URBs,
4007 	 * or if an error occurs when a URB is submitted.
4008 	 */
4009 	do {
4010 		struct skb_data *entry;
4011 		struct sk_buff *tx_buf;
4012 		unsigned long flags;
4013 
4014 		if (skb_queue_empty(&dev->txq_pend))
4015 			break;
4016 
4017 		tx_buf = lan78xx_get_tx_buf(dev);
4018 		if (!tx_buf)
4019 			break;
4020 
4021 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4022 
4023 		spin_lock_irqsave(&dev->txq.lock, flags);
4024 		ret = usb_autopm_get_interface_async(dev->intf);
4025 		if (ret < 0) {
4026 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4027 			goto out;
4028 		}
4029 
4030 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4031 				  tx_buf->data, tx_buf->len, tx_complete,
4032 				  tx_buf);
4033 
4034 		if (tx_buf->len % dev->maxpacket == 0) {
4035 			/* send USB_ZERO_PACKET */
4036 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4037 		}
4038 
4039 #ifdef CONFIG_PM
4040 		/* if device is asleep stop outgoing packet processing */
4041 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4042 			usb_anchor_urb(entry->urb, &dev->deferred);
4043 			netif_stop_queue(dev->net);
4044 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4045 			netdev_dbg(dev->net,
4046 				   "Delaying transmission for resumption\n");
4047 			return;
4048 		}
4049 #endif
4050 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4051 		switch (ret) {
4052 		case 0:
4053 			netif_trans_update(dev->net);
4054 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4055 			break;
4056 		case -EPIPE:
4057 			netif_stop_queue(dev->net);
4058 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4059 			usb_autopm_put_interface_async(dev->intf);
4060 			break;
4061 		case -ENODEV:
4062 		case -ENOENT:
4063 			netif_dbg(dev, tx_err, dev->net,
4064 				  "tx submit urb err %d (disconnected?)", ret);
4065 			netif_device_detach(dev->net);
4066 			break;
4067 		default:
4068 			usb_autopm_put_interface_async(dev->intf);
4069 			netif_dbg(dev, tx_err, dev->net,
4070 				  "tx submit urb err %d\n", ret);
4071 			break;
4072 		}
4073 
4074 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4075 
4076 		if (ret) {
4077 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4078 out:
4079 			dev->net->stats.tx_dropped += entry->num_of_packet;
4080 			lan78xx_release_tx_buf(dev, tx_buf);
4081 		}
4082 	} while (ret == 0);
4083 }
4084 
4085 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4086 {
4087 	struct sk_buff_head done;
4088 	struct sk_buff *rx_buf;
4089 	struct skb_data *entry;
4090 	unsigned long flags;
4091 	int work_done = 0;
4092 
4093 	/* Pass frames received in the last NAPI cycle before
4094 	 * working on newly completed URBs.
4095 	 */
4096 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4097 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4098 		++work_done;
4099 	}
4100 
4101 	/* Take a snapshot of the done queue and move items to a
4102 	 * temporary queue. Rx URB completions will continue to add
4103 	 * to the done queue.
4104 	 */
4105 	__skb_queue_head_init(&done);
4106 
4107 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4108 	skb_queue_splice_init(&dev->rxq_done, &done);
4109 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4110 
4111 	/* Extract receive frames from completed URBs and
4112 	 * pass them to the stack. Re-submit each completed URB.
4113 	 */
4114 	while ((work_done < budget) &&
4115 	       (rx_buf = __skb_dequeue(&done))) {
4116 		entry = (struct skb_data *)(rx_buf->cb);
4117 		switch (entry->state) {
4118 		case rx_done:
4119 			rx_process(dev, rx_buf, budget, &work_done);
4120 			break;
4121 		case rx_cleanup:
4122 			break;
4123 		default:
4124 			netdev_dbg(dev->net, "rx buf state %d\n",
4125 				   entry->state);
4126 			break;
4127 		}
4128 
4129 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4130 	}
4131 
4132 	/* If budget was consumed before processing all the URBs put them
4133 	 * back on the front of the done queue. They will be first to be
4134 	 * processed in the next NAPI cycle.
4135 	 */
4136 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4137 	skb_queue_splice(&done, &dev->rxq_done);
4138 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4139 
4140 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4141 		/* reset update timer delta */
4142 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4143 			dev->delta = 1;
4144 			mod_timer(&dev->stat_monitor,
4145 				  jiffies + STAT_UPDATE_TIMER);
4146 		}
4147 
4148 		/* Submit all free Rx URBs */
4149 
4150 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4151 			lan78xx_rx_urb_submit_all(dev);
4152 
4153 		/* Submit new Tx URBs */
4154 
4155 		lan78xx_tx_bh(dev);
4156 	}
4157 
4158 	return work_done;
4159 }
4160 
4161 static int lan78xx_poll(struct napi_struct *napi, int budget)
4162 {
4163 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4164 	int result = budget;
4165 	int work_done;
4166 
4167 	/* Don't do any work if the device is suspended */
4168 
4169 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4170 		napi_complete_done(napi, 0);
4171 		return 0;
4172 	}
4173 
4174 	/* Process completed URBs and submit new URBs */
4175 
4176 	work_done = lan78xx_bh(dev, budget);
4177 
4178 	if (work_done < budget) {
4179 		napi_complete_done(napi, work_done);
4180 
4181 		/* Start a new polling cycle if data was received or
4182 		 * data is waiting to be transmitted.
4183 		 */
4184 		if (!skb_queue_empty(&dev->rxq_done)) {
4185 			napi_schedule(napi);
4186 		} else if (netif_carrier_ok(dev->net)) {
4187 			if (skb_queue_empty(&dev->txq) &&
4188 			    !skb_queue_empty(&dev->txq_pend)) {
4189 				napi_schedule(napi);
4190 			} else {
4191 				netif_tx_lock(dev->net);
4192 				if (netif_queue_stopped(dev->net)) {
4193 					netif_wake_queue(dev->net);
4194 					napi_schedule(napi);
4195 				}
4196 				netif_tx_unlock(dev->net);
4197 			}
4198 		}
4199 		result = work_done;
4200 	}
4201 
4202 	return result;
4203 }
4204 
4205 static void lan78xx_delayedwork(struct work_struct *work)
4206 {
4207 	int status;
4208 	struct lan78xx_net *dev;
4209 
4210 	dev = container_of(work, struct lan78xx_net, wq.work);
4211 
4212 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4213 		return;
4214 
4215 	if (usb_autopm_get_interface(dev->intf) < 0)
4216 		return;
4217 
4218 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4219 		unlink_urbs(dev, &dev->txq);
4220 
4221 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4222 		if (status < 0 &&
4223 		    status != -EPIPE &&
4224 		    status != -ESHUTDOWN) {
4225 			if (netif_msg_tx_err(dev))
4226 				netdev_err(dev->net,
4227 					   "can't clear tx halt, status %d\n",
4228 					   status);
4229 		} else {
4230 			clear_bit(EVENT_TX_HALT, &dev->flags);
4231 			if (status != -ESHUTDOWN)
4232 				netif_wake_queue(dev->net);
4233 		}
4234 	}
4235 
4236 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4237 		unlink_urbs(dev, &dev->rxq);
4238 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4239 		if (status < 0 &&
4240 		    status != -EPIPE &&
4241 		    status != -ESHUTDOWN) {
4242 			if (netif_msg_rx_err(dev))
4243 				netdev_err(dev->net,
4244 					   "can't clear rx halt, status %d\n",
4245 					   status);
4246 		} else {
4247 			clear_bit(EVENT_RX_HALT, &dev->flags);
4248 			napi_schedule(&dev->napi);
4249 		}
4250 	}
4251 
4252 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4253 		int ret = 0;
4254 
4255 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4256 		if (lan78xx_link_reset(dev) < 0) {
4257 			netdev_info(dev->net, "link reset failed (%d)\n",
4258 				    ret);
4259 		}
4260 	}
4261 
4262 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4263 		lan78xx_update_stats(dev);
4264 
4265 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4266 
4267 		mod_timer(&dev->stat_monitor,
4268 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4269 
4270 		dev->delta = min((dev->delta * 2), 50);
4271 	}
4272 
4273 	usb_autopm_put_interface(dev->intf);
4274 }
4275 
4276 static void intr_complete(struct urb *urb)
4277 {
4278 	struct lan78xx_net *dev = urb->context;
4279 	int status = urb->status;
4280 
4281 	switch (status) {
4282 	/* success */
4283 	case 0:
4284 		lan78xx_status(dev, urb);
4285 		break;
4286 
4287 	/* software-driven interface shutdown */
4288 	case -ENOENT:			/* urb killed */
4289 	case -ENODEV:			/* hardware gone */
4290 	case -ESHUTDOWN:		/* hardware gone */
4291 		netif_dbg(dev, ifdown, dev->net,
4292 			  "intr shutdown, code %d\n", status);
4293 		return;
4294 
4295 	/* NOTE:  not throttling like RX/TX, since this endpoint
4296 	 * already polls infrequently
4297 	 */
4298 	default:
4299 		netdev_dbg(dev->net, "intr status %d\n", status);
4300 		break;
4301 	}
4302 
4303 	if (!netif_device_present(dev->net) ||
4304 	    !netif_running(dev->net)) {
4305 		netdev_warn(dev->net, "not submitting new status URB");
4306 		return;
4307 	}
4308 
4309 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4310 	status = usb_submit_urb(urb, GFP_ATOMIC);
4311 
4312 	switch (status) {
4313 	case  0:
4314 		break;
4315 	case -ENODEV:
4316 	case -ENOENT:
4317 		netif_dbg(dev, timer, dev->net,
4318 			  "intr resubmit %d (disconnect?)", status);
4319 		netif_device_detach(dev->net);
4320 		break;
4321 	default:
4322 		netif_err(dev, timer, dev->net,
4323 			  "intr resubmit --> %d\n", status);
4324 		break;
4325 	}
4326 }
4327 
4328 static void lan78xx_disconnect(struct usb_interface *intf)
4329 {
4330 	struct lan78xx_net *dev;
4331 	struct usb_device *udev;
4332 	struct net_device *net;
4333 	struct phy_device *phydev;
4334 
4335 	dev = usb_get_intfdata(intf);
4336 	usb_set_intfdata(intf, NULL);
4337 	if (!dev)
4338 		return;
4339 
4340 	netif_napi_del(&dev->napi);
4341 
4342 	udev = interface_to_usbdev(intf);
4343 	net = dev->net;
4344 
4345 	unregister_netdev(net);
4346 
4347 	timer_shutdown_sync(&dev->stat_monitor);
4348 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4349 	cancel_delayed_work_sync(&dev->wq);
4350 
4351 	phydev = net->phydev;
4352 
4353 	phy_disconnect(net->phydev);
4354 
4355 	if (phy_is_pseudo_fixed_link(phydev)) {
4356 		fixed_phy_unregister(phydev);
4357 		phy_device_free(phydev);
4358 	}
4359 
4360 	usb_scuttle_anchored_urbs(&dev->deferred);
4361 
4362 	lan78xx_unbind(dev, intf);
4363 
4364 	lan78xx_free_tx_resources(dev);
4365 	lan78xx_free_rx_resources(dev);
4366 
4367 	usb_kill_urb(dev->urb_intr);
4368 	usb_free_urb(dev->urb_intr);
4369 
4370 	free_netdev(net);
4371 	usb_put_dev(udev);
4372 }
4373 
4374 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4375 {
4376 	struct lan78xx_net *dev = netdev_priv(net);
4377 
4378 	unlink_urbs(dev, &dev->txq);
4379 	napi_schedule(&dev->napi);
4380 }
4381 
4382 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4383 						struct net_device *netdev,
4384 						netdev_features_t features)
4385 {
4386 	struct lan78xx_net *dev = netdev_priv(netdev);
4387 
4388 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4389 		features &= ~NETIF_F_GSO_MASK;
4390 
4391 	features = vlan_features_check(skb, features);
4392 	features = vxlan_features_check(skb, features);
4393 
4394 	return features;
4395 }
4396 
4397 static const struct net_device_ops lan78xx_netdev_ops = {
4398 	.ndo_open		= lan78xx_open,
4399 	.ndo_stop		= lan78xx_stop,
4400 	.ndo_start_xmit		= lan78xx_start_xmit,
4401 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4402 	.ndo_change_mtu		= lan78xx_change_mtu,
4403 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4404 	.ndo_validate_addr	= eth_validate_addr,
4405 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4406 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4407 	.ndo_set_features	= lan78xx_set_features,
4408 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4409 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4410 	.ndo_features_check	= lan78xx_features_check,
4411 };
4412 
4413 static void lan78xx_stat_monitor(struct timer_list *t)
4414 {
4415 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4416 
4417 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4418 }
4419 
4420 static int lan78xx_probe(struct usb_interface *intf,
4421 			 const struct usb_device_id *id)
4422 {
4423 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4424 	struct lan78xx_net *dev;
4425 	struct net_device *netdev;
4426 	struct usb_device *udev;
4427 	int ret;
4428 	unsigned int maxp;
4429 	unsigned int period;
4430 	u8 *buf = NULL;
4431 
4432 	udev = interface_to_usbdev(intf);
4433 	udev = usb_get_dev(udev);
4434 
4435 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4436 	if (!netdev) {
4437 		dev_err(&intf->dev, "Error: OOM\n");
4438 		ret = -ENOMEM;
4439 		goto out1;
4440 	}
4441 
4442 	/* netdev_printk() needs this */
4443 	SET_NETDEV_DEV(netdev, &intf->dev);
4444 
4445 	dev = netdev_priv(netdev);
4446 	dev->udev = udev;
4447 	dev->intf = intf;
4448 	dev->net = netdev;
4449 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4450 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4451 
4452 	skb_queue_head_init(&dev->rxq);
4453 	skb_queue_head_init(&dev->txq);
4454 	skb_queue_head_init(&dev->rxq_done);
4455 	skb_queue_head_init(&dev->txq_pend);
4456 	skb_queue_head_init(&dev->rxq_overflow);
4457 	mutex_init(&dev->phy_mutex);
4458 	mutex_init(&dev->dev_mutex);
4459 
4460 	ret = lan78xx_urb_config_init(dev);
4461 	if (ret < 0)
4462 		goto out2;
4463 
4464 	ret = lan78xx_alloc_tx_resources(dev);
4465 	if (ret < 0)
4466 		goto out2;
4467 
4468 	ret = lan78xx_alloc_rx_resources(dev);
4469 	if (ret < 0)
4470 		goto out3;
4471 
4472 	/* MTU range: 68 - 9000 */
4473 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4474 
4475 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4476 
4477 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4478 
4479 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4480 	init_usb_anchor(&dev->deferred);
4481 
4482 	netdev->netdev_ops = &lan78xx_netdev_ops;
4483 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4484 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4485 
4486 	dev->delta = 1;
4487 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4488 
4489 	mutex_init(&dev->stats.access_lock);
4490 
4491 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4492 		ret = -ENODEV;
4493 		goto out4;
4494 	}
4495 
4496 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4497 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4498 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4499 		ret = -ENODEV;
4500 		goto out4;
4501 	}
4502 
4503 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4504 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4505 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4506 		ret = -ENODEV;
4507 		goto out4;
4508 	}
4509 
4510 	ep_intr = &intf->cur_altsetting->endpoint[2];
4511 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4512 		ret = -ENODEV;
4513 		goto out4;
4514 	}
4515 
4516 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4517 					usb_endpoint_num(&ep_intr->desc));
4518 
4519 	ret = lan78xx_bind(dev, intf);
4520 	if (ret < 0)
4521 		goto out4;
4522 
4523 	period = ep_intr->desc.bInterval;
4524 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4525 
4526 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4527 	if (!dev->urb_intr) {
4528 		ret = -ENOMEM;
4529 		goto out5;
4530 	}
4531 
4532 	buf = kmalloc(maxp, GFP_KERNEL);
4533 	if (!buf) {
4534 		ret = -ENOMEM;
4535 		goto free_urbs;
4536 	}
4537 
4538 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4539 			 dev->pipe_intr, buf, maxp,
4540 			 intr_complete, dev, period);
4541 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4542 
4543 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4544 
4545 	/* Reject broken descriptors. */
4546 	if (dev->maxpacket == 0) {
4547 		ret = -ENODEV;
4548 		goto free_urbs;
4549 	}
4550 
4551 	/* driver requires remote-wakeup capability during autosuspend. */
4552 	intf->needs_remote_wakeup = 1;
4553 
4554 	ret = lan78xx_phy_init(dev);
4555 	if (ret < 0)
4556 		goto free_urbs;
4557 
4558 	ret = register_netdev(netdev);
4559 	if (ret != 0) {
4560 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4561 		goto out8;
4562 	}
4563 
4564 	usb_set_intfdata(intf, dev);
4565 
4566 	ret = device_set_wakeup_enable(&udev->dev, true);
4567 
4568 	 /* Default delay of 2sec has more overhead than advantage.
4569 	  * Set to 10sec as default.
4570 	  */
4571 	pm_runtime_set_autosuspend_delay(&udev->dev,
4572 					 DEFAULT_AUTOSUSPEND_DELAY);
4573 
4574 	return 0;
4575 
4576 out8:
4577 	phy_disconnect(netdev->phydev);
4578 free_urbs:
4579 	usb_free_urb(dev->urb_intr);
4580 out5:
4581 	lan78xx_unbind(dev, intf);
4582 out4:
4583 	netif_napi_del(&dev->napi);
4584 	lan78xx_free_rx_resources(dev);
4585 out3:
4586 	lan78xx_free_tx_resources(dev);
4587 out2:
4588 	free_netdev(netdev);
4589 out1:
4590 	usb_put_dev(udev);
4591 
4592 	return ret;
4593 }
4594 
4595 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4596 {
4597 	const u16 crc16poly = 0x8005;
4598 	int i;
4599 	u16 bit, crc, msb;
4600 	u8 data;
4601 
4602 	crc = 0xFFFF;
4603 	for (i = 0; i < len; i++) {
4604 		data = *buf++;
4605 		for (bit = 0; bit < 8; bit++) {
4606 			msb = crc >> 15;
4607 			crc <<= 1;
4608 
4609 			if (msb ^ (u16)(data & 1)) {
4610 				crc ^= crc16poly;
4611 				crc |= (u16)0x0001U;
4612 			}
4613 			data >>= 1;
4614 		}
4615 	}
4616 
4617 	return crc;
4618 }
4619 
4620 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4621 {
4622 	u32 buf;
4623 	int ret;
4624 
4625 	ret = lan78xx_stop_tx_path(dev);
4626 	if (ret < 0)
4627 		return ret;
4628 
4629 	ret = lan78xx_stop_rx_path(dev);
4630 	if (ret < 0)
4631 		return ret;
4632 
4633 	/* auto suspend (selective suspend) */
4634 
4635 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4636 	if (ret < 0)
4637 		return ret;
4638 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4639 	if (ret < 0)
4640 		return ret;
4641 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4642 	if (ret < 0)
4643 		return ret;
4644 
4645 	/* set goodframe wakeup */
4646 
4647 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4648 	if (ret < 0)
4649 		return ret;
4650 
4651 	buf |= WUCSR_RFE_WAKE_EN_;
4652 	buf |= WUCSR_STORE_WAKE_;
4653 
4654 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4655 	if (ret < 0)
4656 		return ret;
4657 
4658 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4659 	if (ret < 0)
4660 		return ret;
4661 
4662 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4663 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4664 	buf |= PMT_CTL_PHY_WAKE_EN_;
4665 	buf |= PMT_CTL_WOL_EN_;
4666 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4667 	buf |= PMT_CTL_SUS_MODE_3_;
4668 
4669 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4670 	if (ret < 0)
4671 		return ret;
4672 
4673 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4674 	if (ret < 0)
4675 		return ret;
4676 
4677 	buf |= PMT_CTL_WUPS_MASK_;
4678 
4679 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4680 	if (ret < 0)
4681 		return ret;
4682 
4683 	ret = lan78xx_start_rx_path(dev);
4684 
4685 	return ret;
4686 }
4687 
4688 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4689 {
4690 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4691 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4692 	const u8 arp_type[2] = { 0x08, 0x06 };
4693 	u32 temp_pmt_ctl;
4694 	int mask_index;
4695 	u32 temp_wucsr;
4696 	u32 buf;
4697 	u16 crc;
4698 	int ret;
4699 
4700 	ret = lan78xx_stop_tx_path(dev);
4701 	if (ret < 0)
4702 		return ret;
4703 	ret = lan78xx_stop_rx_path(dev);
4704 	if (ret < 0)
4705 		return ret;
4706 
4707 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4708 	if (ret < 0)
4709 		return ret;
4710 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4711 	if (ret < 0)
4712 		return ret;
4713 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4714 	if (ret < 0)
4715 		return ret;
4716 
4717 	temp_wucsr = 0;
4718 
4719 	temp_pmt_ctl = 0;
4720 
4721 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4722 	if (ret < 0)
4723 		return ret;
4724 
4725 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4726 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4727 
4728 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4729 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4730 		if (ret < 0)
4731 			return ret;
4732 	}
4733 
4734 	mask_index = 0;
4735 	if (wol & WAKE_PHY) {
4736 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4737 
4738 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4739 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4740 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4741 	}
4742 	if (wol & WAKE_MAGIC) {
4743 		temp_wucsr |= WUCSR_MPEN_;
4744 
4745 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4746 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4747 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4748 	}
4749 	if (wol & WAKE_BCAST) {
4750 		temp_wucsr |= WUCSR_BCST_EN_;
4751 
4752 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4753 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4754 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4755 	}
4756 	if (wol & WAKE_MCAST) {
4757 		temp_wucsr |= WUCSR_WAKE_EN_;
4758 
4759 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4760 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4761 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4762 					WUF_CFGX_EN_ |
4763 					WUF_CFGX_TYPE_MCAST_ |
4764 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4765 					(crc & WUF_CFGX_CRC16_MASK_));
4766 		if (ret < 0)
4767 			return ret;
4768 
4769 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4770 		if (ret < 0)
4771 			return ret;
4772 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4773 		if (ret < 0)
4774 			return ret;
4775 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4776 		if (ret < 0)
4777 			return ret;
4778 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4779 		if (ret < 0)
4780 			return ret;
4781 
4782 		mask_index++;
4783 
4784 		/* for IPv6 Multicast */
4785 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4786 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4787 					WUF_CFGX_EN_ |
4788 					WUF_CFGX_TYPE_MCAST_ |
4789 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4790 					(crc & WUF_CFGX_CRC16_MASK_));
4791 		if (ret < 0)
4792 			return ret;
4793 
4794 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4795 		if (ret < 0)
4796 			return ret;
4797 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4798 		if (ret < 0)
4799 			return ret;
4800 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4801 		if (ret < 0)
4802 			return ret;
4803 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4804 		if (ret < 0)
4805 			return ret;
4806 
4807 		mask_index++;
4808 
4809 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4810 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4811 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4812 	}
4813 	if (wol & WAKE_UCAST) {
4814 		temp_wucsr |= WUCSR_PFDA_EN_;
4815 
4816 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4817 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4818 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4819 	}
4820 	if (wol & WAKE_ARP) {
4821 		temp_wucsr |= WUCSR_WAKE_EN_;
4822 
4823 		/* set WUF_CFG & WUF_MASK
4824 		 * for packettype (offset 12,13) = ARP (0x0806)
4825 		 */
4826 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4827 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4828 					WUF_CFGX_EN_ |
4829 					WUF_CFGX_TYPE_ALL_ |
4830 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4831 					(crc & WUF_CFGX_CRC16_MASK_));
4832 		if (ret < 0)
4833 			return ret;
4834 
4835 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4836 		if (ret < 0)
4837 			return ret;
4838 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4839 		if (ret < 0)
4840 			return ret;
4841 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4842 		if (ret < 0)
4843 			return ret;
4844 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4845 		if (ret < 0)
4846 			return ret;
4847 
4848 		mask_index++;
4849 
4850 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4851 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4852 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4853 	}
4854 
4855 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4856 	if (ret < 0)
4857 		return ret;
4858 
4859 	/* when multiple WOL bits are set */
4860 	if (hweight_long((unsigned long)wol) > 1) {
4861 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4862 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4863 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4864 	}
4865 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4866 	if (ret < 0)
4867 		return ret;
4868 
4869 	/* clear WUPS */
4870 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4871 	if (ret < 0)
4872 		return ret;
4873 
4874 	buf |= PMT_CTL_WUPS_MASK_;
4875 
4876 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4877 	if (ret < 0)
4878 		return ret;
4879 
4880 	ret = lan78xx_start_rx_path(dev);
4881 
4882 	return ret;
4883 }
4884 
4885 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4886 {
4887 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4888 	bool dev_open;
4889 	int ret;
4890 
4891 	mutex_lock(&dev->dev_mutex);
4892 
4893 	netif_dbg(dev, ifdown, dev->net,
4894 		  "suspending: pm event %#x", message.event);
4895 
4896 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4897 
4898 	if (dev_open) {
4899 		spin_lock_irq(&dev->txq.lock);
4900 		/* don't autosuspend while transmitting */
4901 		if ((skb_queue_len(&dev->txq) ||
4902 		     skb_queue_len(&dev->txq_pend)) &&
4903 		    PMSG_IS_AUTO(message)) {
4904 			spin_unlock_irq(&dev->txq.lock);
4905 			ret = -EBUSY;
4906 			goto out;
4907 		} else {
4908 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4909 			spin_unlock_irq(&dev->txq.lock);
4910 		}
4911 
4912 		/* stop RX */
4913 		ret = lan78xx_stop_rx_path(dev);
4914 		if (ret < 0)
4915 			goto out;
4916 
4917 		ret = lan78xx_flush_rx_fifo(dev);
4918 		if (ret < 0)
4919 			goto out;
4920 
4921 		/* stop Tx */
4922 		ret = lan78xx_stop_tx_path(dev);
4923 		if (ret < 0)
4924 			goto out;
4925 
4926 		/* empty out the Rx and Tx queues */
4927 		netif_device_detach(dev->net);
4928 		lan78xx_terminate_urbs(dev);
4929 		usb_kill_urb(dev->urb_intr);
4930 
4931 		/* reattach */
4932 		netif_device_attach(dev->net);
4933 
4934 		del_timer(&dev->stat_monitor);
4935 
4936 		if (PMSG_IS_AUTO(message)) {
4937 			ret = lan78xx_set_auto_suspend(dev);
4938 			if (ret < 0)
4939 				goto out;
4940 		} else {
4941 			struct lan78xx_priv *pdata;
4942 
4943 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4944 			netif_carrier_off(dev->net);
4945 			ret = lan78xx_set_suspend(dev, pdata->wol);
4946 			if (ret < 0)
4947 				goto out;
4948 		}
4949 	} else {
4950 		/* Interface is down; don't allow WOL and PHY
4951 		 * events to wake up the host
4952 		 */
4953 		u32 buf;
4954 
4955 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4956 
4957 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4958 		if (ret < 0)
4959 			goto out;
4960 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4961 		if (ret < 0)
4962 			goto out;
4963 
4964 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4965 		if (ret < 0)
4966 			goto out;
4967 
4968 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4969 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4970 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4971 		buf |= PMT_CTL_SUS_MODE_3_;
4972 
4973 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4974 		if (ret < 0)
4975 			goto out;
4976 
4977 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4978 		if (ret < 0)
4979 			goto out;
4980 
4981 		buf |= PMT_CTL_WUPS_MASK_;
4982 
4983 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4984 		if (ret < 0)
4985 			goto out;
4986 	}
4987 
4988 	ret = 0;
4989 out:
4990 	mutex_unlock(&dev->dev_mutex);
4991 
4992 	return ret;
4993 }
4994 
4995 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4996 {
4997 	bool pipe_halted = false;
4998 	struct urb *urb;
4999 
5000 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5001 		struct sk_buff *skb = urb->context;
5002 		int ret;
5003 
5004 		if (!netif_device_present(dev->net) ||
5005 		    !netif_carrier_ok(dev->net) ||
5006 		    pipe_halted) {
5007 			lan78xx_release_tx_buf(dev, skb);
5008 			continue;
5009 		}
5010 
5011 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5012 
5013 		if (ret == 0) {
5014 			netif_trans_update(dev->net);
5015 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5016 		} else {
5017 			if (ret == -EPIPE) {
5018 				netif_stop_queue(dev->net);
5019 				pipe_halted = true;
5020 			} else if (ret == -ENODEV) {
5021 				netif_device_detach(dev->net);
5022 			}
5023 
5024 			lan78xx_release_tx_buf(dev, skb);
5025 		}
5026 	}
5027 
5028 	return pipe_halted;
5029 }
5030 
5031 static int lan78xx_resume(struct usb_interface *intf)
5032 {
5033 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5034 	bool dev_open;
5035 	int ret;
5036 
5037 	mutex_lock(&dev->dev_mutex);
5038 
5039 	netif_dbg(dev, ifup, dev->net, "resuming device");
5040 
5041 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5042 
5043 	if (dev_open) {
5044 		bool pipe_halted = false;
5045 
5046 		ret = lan78xx_flush_tx_fifo(dev);
5047 		if (ret < 0)
5048 			goto out;
5049 
5050 		if (dev->urb_intr) {
5051 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5052 
5053 			if (ret < 0) {
5054 				if (ret == -ENODEV)
5055 					netif_device_detach(dev->net);
5056 				netdev_warn(dev->net, "Failed to submit intr URB");
5057 			}
5058 		}
5059 
5060 		spin_lock_irq(&dev->txq.lock);
5061 
5062 		if (netif_device_present(dev->net)) {
5063 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5064 
5065 			if (pipe_halted)
5066 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5067 		}
5068 
5069 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5070 
5071 		spin_unlock_irq(&dev->txq.lock);
5072 
5073 		if (!pipe_halted &&
5074 		    netif_device_present(dev->net) &&
5075 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5076 			netif_start_queue(dev->net);
5077 
5078 		ret = lan78xx_start_tx_path(dev);
5079 		if (ret < 0)
5080 			goto out;
5081 
5082 		napi_schedule(&dev->napi);
5083 
5084 		if (!timer_pending(&dev->stat_monitor)) {
5085 			dev->delta = 1;
5086 			mod_timer(&dev->stat_monitor,
5087 				  jiffies + STAT_UPDATE_TIMER);
5088 		}
5089 
5090 	} else {
5091 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5092 	}
5093 
5094 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5095 	if (ret < 0)
5096 		goto out;
5097 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5098 	if (ret < 0)
5099 		goto out;
5100 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5101 	if (ret < 0)
5102 		goto out;
5103 
5104 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5105 					     WUCSR2_ARP_RCD_ |
5106 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5107 					     WUCSR2_IPV4_TCPSYN_RCD_);
5108 	if (ret < 0)
5109 		goto out;
5110 
5111 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5112 					    WUCSR_EEE_RX_WAKE_ |
5113 					    WUCSR_PFDA_FR_ |
5114 					    WUCSR_RFE_WAKE_FR_ |
5115 					    WUCSR_WUFR_ |
5116 					    WUCSR_MPR_ |
5117 					    WUCSR_BCST_FR_);
5118 	if (ret < 0)
5119 		goto out;
5120 
5121 	ret = 0;
5122 out:
5123 	mutex_unlock(&dev->dev_mutex);
5124 
5125 	return ret;
5126 }
5127 
5128 static int lan78xx_reset_resume(struct usb_interface *intf)
5129 {
5130 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5131 	int ret;
5132 
5133 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5134 
5135 	ret = lan78xx_reset(dev);
5136 	if (ret < 0)
5137 		return ret;
5138 
5139 	phy_start(dev->net->phydev);
5140 
5141 	ret = lan78xx_resume(intf);
5142 
5143 	return ret;
5144 }
5145 
5146 static const struct usb_device_id products[] = {
5147 	{
5148 	/* LAN7800 USB Gigabit Ethernet Device */
5149 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5150 	},
5151 	{
5152 	/* LAN7850 USB Gigabit Ethernet Device */
5153 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5154 	},
5155 	{
5156 	/* LAN7801 USB Gigabit Ethernet Device */
5157 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5158 	},
5159 	{
5160 	/* ATM2-AF USB Gigabit Ethernet Device */
5161 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5162 	},
5163 	{},
5164 };
5165 MODULE_DEVICE_TABLE(usb, products);
5166 
5167 static struct usb_driver lan78xx_driver = {
5168 	.name			= DRIVER_NAME,
5169 	.id_table		= products,
5170 	.probe			= lan78xx_probe,
5171 	.disconnect		= lan78xx_disconnect,
5172 	.suspend		= lan78xx_suspend,
5173 	.resume			= lan78xx_resume,
5174 	.reset_resume		= lan78xx_reset_resume,
5175 	.supports_autosuspend	= 1,
5176 	.disable_hub_initiated_lpm = 1,
5177 };
5178 
5179 module_usb_driver(lan78xx_driver);
5180 
5181 MODULE_AUTHOR(DRIVER_AUTHOR);
5182 MODULE_DESCRIPTION(DRIVER_DESC);
5183 MODULE_LICENSE("GPL");
5184