xref: /linux/drivers/net/usb/lan78xx.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY		(0x0800)
52 #define MAX_SINGLE_PACKET_SIZE		(9000)
53 #define DEFAULT_TX_CSUM_ENABLE		(true)
54 #define DEFAULT_RX_CSUM_ENABLE		(true)
55 #define DEFAULT_TSO_CSUM_ENABLE		(true)
56 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
57 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
58 #define TX_OVERHEAD			(8)
59 #define RXW_PADDING			2
60 
61 #define LAN78XX_USB_VENDOR_ID		(0x0424)
62 #define LAN7800_USB_PRODUCT_ID		(0x7800)
63 #define LAN7850_USB_PRODUCT_ID		(0x7850)
64 #define LAN7801_USB_PRODUCT_ID		(0x7801)
65 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
66 #define LAN78XX_OTP_MAGIC		(0x78F3)
67 
68 #define	MII_READ			1
69 #define	MII_WRITE			0
70 
71 #define EEPROM_INDICATOR		(0xA5)
72 #define EEPROM_MAC_OFFSET		(0x01)
73 #define MAX_EEPROM_SIZE			512
74 #define OTP_INDICATOR_1			(0xF3)
75 #define OTP_INDICATOR_2			(0xF7)
76 
77 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
78 					 WAKE_MCAST | WAKE_BCAST | \
79 					 WAKE_ARP | WAKE_MAGIC)
80 
81 /* USB related defines */
82 #define BULK_IN_PIPE			1
83 #define BULK_OUT_PIPE			2
84 
85 /* default autosuspend delay (mSec)*/
86 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
87 
88 /* statistic update interval (mSec) */
89 #define STAT_UPDATE_TIMER		(1 * 1000)
90 
91 /* defines interrupts from interrupt EP */
92 #define MAX_INT_EP			(32)
93 #define INT_EP_INTEP			(31)
94 #define INT_EP_OTP_WR_DONE		(28)
95 #define INT_EP_EEE_TX_LPI_START		(26)
96 #define INT_EP_EEE_TX_LPI_STOP		(25)
97 #define INT_EP_EEE_RX_LPI		(24)
98 #define INT_EP_MAC_RESET_TIMEOUT	(23)
99 #define INT_EP_RDFO			(22)
100 #define INT_EP_TXE			(21)
101 #define INT_EP_USB_STATUS		(20)
102 #define INT_EP_TX_DIS			(19)
103 #define INT_EP_RX_DIS			(18)
104 #define INT_EP_PHY			(17)
105 #define INT_EP_DP			(16)
106 #define INT_EP_MAC_ERR			(15)
107 #define INT_EP_TDFU			(14)
108 #define INT_EP_TDFO			(13)
109 #define INT_EP_UTX			(12)
110 #define INT_EP_GPIO_11			(11)
111 #define INT_EP_GPIO_10			(10)
112 #define INT_EP_GPIO_9			(9)
113 #define INT_EP_GPIO_8			(8)
114 #define INT_EP_GPIO_7			(7)
115 #define INT_EP_GPIO_6			(6)
116 #define INT_EP_GPIO_5			(5)
117 #define INT_EP_GPIO_4			(4)
118 #define INT_EP_GPIO_3			(3)
119 #define INT_EP_GPIO_2			(2)
120 #define INT_EP_GPIO_1			(1)
121 #define INT_EP_GPIO_0			(0)
122 
123 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 	"RX FCS Errors",
125 	"RX Alignment Errors",
126 	"Rx Fragment Errors",
127 	"RX Jabber Errors",
128 	"RX Undersize Frame Errors",
129 	"RX Oversize Frame Errors",
130 	"RX Dropped Frames",
131 	"RX Unicast Byte Count",
132 	"RX Broadcast Byte Count",
133 	"RX Multicast Byte Count",
134 	"RX Unicast Frames",
135 	"RX Broadcast Frames",
136 	"RX Multicast Frames",
137 	"RX Pause Frames",
138 	"RX 64 Byte Frames",
139 	"RX 65 - 127 Byte Frames",
140 	"RX 128 - 255 Byte Frames",
141 	"RX 256 - 511 Bytes Frames",
142 	"RX 512 - 1023 Byte Frames",
143 	"RX 1024 - 1518 Byte Frames",
144 	"RX Greater 1518 Byte Frames",
145 	"EEE RX LPI Transitions",
146 	"EEE RX LPI Time",
147 	"TX FCS Errors",
148 	"TX Excess Deferral Errors",
149 	"TX Carrier Errors",
150 	"TX Bad Byte Count",
151 	"TX Single Collisions",
152 	"TX Multiple Collisions",
153 	"TX Excessive Collision",
154 	"TX Late Collisions",
155 	"TX Unicast Byte Count",
156 	"TX Broadcast Byte Count",
157 	"TX Multicast Byte Count",
158 	"TX Unicast Frames",
159 	"TX Broadcast Frames",
160 	"TX Multicast Frames",
161 	"TX Pause Frames",
162 	"TX 64 Byte Frames",
163 	"TX 65 - 127 Byte Frames",
164 	"TX 128 - 255 Byte Frames",
165 	"TX 256 - 511 Bytes Frames",
166 	"TX 512 - 1023 Byte Frames",
167 	"TX 1024 - 1518 Byte Frames",
168 	"TX Greater 1518 Byte Frames",
169 	"EEE TX LPI Transitions",
170 	"EEE TX LPI Time",
171 };
172 
173 struct lan78xx_statstage {
174 	u32 rx_fcs_errors;
175 	u32 rx_alignment_errors;
176 	u32 rx_fragment_errors;
177 	u32 rx_jabber_errors;
178 	u32 rx_undersize_frame_errors;
179 	u32 rx_oversize_frame_errors;
180 	u32 rx_dropped_frames;
181 	u32 rx_unicast_byte_count;
182 	u32 rx_broadcast_byte_count;
183 	u32 rx_multicast_byte_count;
184 	u32 rx_unicast_frames;
185 	u32 rx_broadcast_frames;
186 	u32 rx_multicast_frames;
187 	u32 rx_pause_frames;
188 	u32 rx_64_byte_frames;
189 	u32 rx_65_127_byte_frames;
190 	u32 rx_128_255_byte_frames;
191 	u32 rx_256_511_bytes_frames;
192 	u32 rx_512_1023_byte_frames;
193 	u32 rx_1024_1518_byte_frames;
194 	u32 rx_greater_1518_byte_frames;
195 	u32 eee_rx_lpi_transitions;
196 	u32 eee_rx_lpi_time;
197 	u32 tx_fcs_errors;
198 	u32 tx_excess_deferral_errors;
199 	u32 tx_carrier_errors;
200 	u32 tx_bad_byte_count;
201 	u32 tx_single_collisions;
202 	u32 tx_multiple_collisions;
203 	u32 tx_excessive_collision;
204 	u32 tx_late_collisions;
205 	u32 tx_unicast_byte_count;
206 	u32 tx_broadcast_byte_count;
207 	u32 tx_multicast_byte_count;
208 	u32 tx_unicast_frames;
209 	u32 tx_broadcast_frames;
210 	u32 tx_multicast_frames;
211 	u32 tx_pause_frames;
212 	u32 tx_64_byte_frames;
213 	u32 tx_65_127_byte_frames;
214 	u32 tx_128_255_byte_frames;
215 	u32 tx_256_511_bytes_frames;
216 	u32 tx_512_1023_byte_frames;
217 	u32 tx_1024_1518_byte_frames;
218 	u32 tx_greater_1518_byte_frames;
219 	u32 eee_tx_lpi_transitions;
220 	u32 eee_tx_lpi_time;
221 };
222 
223 struct lan78xx_statstage64 {
224 	u64 rx_fcs_errors;
225 	u64 rx_alignment_errors;
226 	u64 rx_fragment_errors;
227 	u64 rx_jabber_errors;
228 	u64 rx_undersize_frame_errors;
229 	u64 rx_oversize_frame_errors;
230 	u64 rx_dropped_frames;
231 	u64 rx_unicast_byte_count;
232 	u64 rx_broadcast_byte_count;
233 	u64 rx_multicast_byte_count;
234 	u64 rx_unicast_frames;
235 	u64 rx_broadcast_frames;
236 	u64 rx_multicast_frames;
237 	u64 rx_pause_frames;
238 	u64 rx_64_byte_frames;
239 	u64 rx_65_127_byte_frames;
240 	u64 rx_128_255_byte_frames;
241 	u64 rx_256_511_bytes_frames;
242 	u64 rx_512_1023_byte_frames;
243 	u64 rx_1024_1518_byte_frames;
244 	u64 rx_greater_1518_byte_frames;
245 	u64 eee_rx_lpi_transitions;
246 	u64 eee_rx_lpi_time;
247 	u64 tx_fcs_errors;
248 	u64 tx_excess_deferral_errors;
249 	u64 tx_carrier_errors;
250 	u64 tx_bad_byte_count;
251 	u64 tx_single_collisions;
252 	u64 tx_multiple_collisions;
253 	u64 tx_excessive_collision;
254 	u64 tx_late_collisions;
255 	u64 tx_unicast_byte_count;
256 	u64 tx_broadcast_byte_count;
257 	u64 tx_multicast_byte_count;
258 	u64 tx_unicast_frames;
259 	u64 tx_broadcast_frames;
260 	u64 tx_multicast_frames;
261 	u64 tx_pause_frames;
262 	u64 tx_64_byte_frames;
263 	u64 tx_65_127_byte_frames;
264 	u64 tx_128_255_byte_frames;
265 	u64 tx_256_511_bytes_frames;
266 	u64 tx_512_1023_byte_frames;
267 	u64 tx_1024_1518_byte_frames;
268 	u64 tx_greater_1518_byte_frames;
269 	u64 eee_tx_lpi_transitions;
270 	u64 eee_tx_lpi_time;
271 };
272 
273 static u32 lan78xx_regs[] = {
274 	ID_REV,
275 	INT_STS,
276 	HW_CFG,
277 	PMT_CTL,
278 	E2P_CMD,
279 	E2P_DATA,
280 	USB_STATUS,
281 	VLAN_TYPE,
282 	MAC_CR,
283 	MAC_RX,
284 	MAC_TX,
285 	FLOW,
286 	ERR_STS,
287 	MII_ACC,
288 	MII_DATA,
289 	EEE_TX_LPI_REQ_DLY,
290 	EEE_TW_TX_SYS,
291 	EEE_TX_LPI_REM_DLY,
292 	WUCSR
293 };
294 
295 #define PHY_REG_SIZE (32 * sizeof(u32))
296 
297 struct lan78xx_net;
298 
299 struct lan78xx_priv {
300 	struct lan78xx_net *dev;
301 	u32 rfe_ctl;
302 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
303 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
304 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
305 	struct mutex dataport_mutex; /* for dataport access */
306 	spinlock_t rfe_ctl_lock; /* for rfe register access */
307 	struct work_struct set_multicast;
308 	struct work_struct set_vlan;
309 	u32 wol;
310 };
311 
312 enum skb_state {
313 	illegal = 0,
314 	tx_start,
315 	tx_done,
316 	rx_start,
317 	rx_done,
318 	rx_cleanup,
319 	unlink_start
320 };
321 
322 struct skb_data {		/* skb->cb is one of these */
323 	struct urb *urb;
324 	struct lan78xx_net *dev;
325 	enum skb_state state;
326 	size_t length;
327 	int num_of_packet;
328 };
329 
330 struct usb_context {
331 	struct usb_ctrlrequest req;
332 	struct lan78xx_net *dev;
333 };
334 
335 #define EVENT_TX_HALT			0
336 #define EVENT_RX_HALT			1
337 #define EVENT_RX_MEMORY			2
338 #define EVENT_STS_SPLIT			3
339 #define EVENT_LINK_RESET		4
340 #define EVENT_RX_PAUSED			5
341 #define EVENT_DEV_WAKING		6
342 #define EVENT_DEV_ASLEEP		7
343 #define EVENT_DEV_OPEN			8
344 #define EVENT_STAT_UPDATE		9
345 
346 struct statstage {
347 	struct mutex			access_lock;	/* for stats access */
348 	struct lan78xx_statstage	saved;
349 	struct lan78xx_statstage	rollover_count;
350 	struct lan78xx_statstage	rollover_max;
351 	struct lan78xx_statstage64	curr_stat;
352 };
353 
354 struct irq_domain_data {
355 	struct irq_domain	*irqdomain;
356 	unsigned int		phyirq;
357 	struct irq_chip		*irqchip;
358 	irq_flow_handler_t	irq_handler;
359 	u32			irqenable;
360 	struct mutex		irq_lock;		/* for irq bus access */
361 };
362 
363 struct lan78xx_net {
364 	struct net_device	*net;
365 	struct usb_device	*udev;
366 	struct usb_interface	*intf;
367 	void			*driver_priv;
368 
369 	int			rx_qlen;
370 	int			tx_qlen;
371 	struct sk_buff_head	rxq;
372 	struct sk_buff_head	txq;
373 	struct sk_buff_head	done;
374 	struct sk_buff_head	rxq_pause;
375 	struct sk_buff_head	txq_pend;
376 
377 	struct tasklet_struct	bh;
378 	struct delayed_work	wq;
379 
380 	int			msg_enable;
381 
382 	struct urb		*urb_intr;
383 	struct usb_anchor	deferred;
384 
385 	struct mutex		phy_mutex; /* for phy access */
386 	unsigned		pipe_in, pipe_out, pipe_intr;
387 
388 	u32			hard_mtu;	/* count any extra framing */
389 	size_t			rx_urb_size;	/* size for rx urbs */
390 
391 	unsigned long		flags;
392 
393 	wait_queue_head_t	*wait;
394 	unsigned char		suspend_count;
395 
396 	unsigned		maxpacket;
397 	struct timer_list	delay;
398 	struct timer_list	stat_monitor;
399 
400 	unsigned long		data[5];
401 
402 	int			link_on;
403 	u8			mdix_ctrl;
404 
405 	u32			chipid;
406 	u32			chiprev;
407 	struct mii_bus		*mdiobus;
408 	phy_interface_t		interface;
409 
410 	int			fc_autoneg;
411 	u8			fc_request_control;
412 
413 	int			delta;
414 	struct statstage	stats;
415 
416 	struct irq_domain_data	domain_data;
417 };
418 
419 /* define external phy id */
420 #define	PHY_LAN8835			(0x0007C130)
421 #define	PHY_KSZ9031RNX			(0x00221620)
422 
423 /* use ethtool to change the level for any given device */
424 static int msg_level = -1;
425 module_param(msg_level, int, 0);
426 MODULE_PARM_DESC(msg_level, "Override default message level");
427 
428 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
429 {
430 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
431 	int ret;
432 
433 	if (!buf)
434 		return -ENOMEM;
435 
436 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
437 			      USB_VENDOR_REQUEST_READ_REGISTER,
438 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
439 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
440 	if (likely(ret >= 0)) {
441 		le32_to_cpus(buf);
442 		*data = *buf;
443 	} else {
444 		netdev_warn(dev->net,
445 			    "Failed to read register index 0x%08x. ret = %d",
446 			    index, ret);
447 	}
448 
449 	kfree(buf);
450 
451 	return ret;
452 }
453 
454 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
455 {
456 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
457 	int ret;
458 
459 	if (!buf)
460 		return -ENOMEM;
461 
462 	*buf = data;
463 	cpu_to_le32s(buf);
464 
465 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
466 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
467 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
468 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
469 	if (unlikely(ret < 0)) {
470 		netdev_warn(dev->net,
471 			    "Failed to write register index 0x%08x. ret = %d",
472 			    index, ret);
473 	}
474 
475 	kfree(buf);
476 
477 	return ret;
478 }
479 
480 static int lan78xx_read_stats(struct lan78xx_net *dev,
481 			      struct lan78xx_statstage *data)
482 {
483 	int ret = 0;
484 	int i;
485 	struct lan78xx_statstage *stats;
486 	u32 *src;
487 	u32 *dst;
488 
489 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
490 	if (!stats)
491 		return -ENOMEM;
492 
493 	ret = usb_control_msg(dev->udev,
494 			      usb_rcvctrlpipe(dev->udev, 0),
495 			      USB_VENDOR_REQUEST_GET_STATS,
496 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
497 			      0,
498 			      0,
499 			      (void *)stats,
500 			      sizeof(*stats),
501 			      USB_CTRL_SET_TIMEOUT);
502 	if (likely(ret >= 0)) {
503 		src = (u32 *)stats;
504 		dst = (u32 *)data;
505 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
506 			le32_to_cpus(&src[i]);
507 			dst[i] = src[i];
508 		}
509 	} else {
510 		netdev_warn(dev->net,
511 			    "Failed to read stat ret = %d", ret);
512 	}
513 
514 	kfree(stats);
515 
516 	return ret;
517 }
518 
519 #define check_counter_rollover(struct1, dev_stats, member) {	\
520 	if (struct1->member < dev_stats.saved.member)		\
521 		dev_stats.rollover_count.member++;		\
522 	}
523 
524 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
525 					struct lan78xx_statstage *stats)
526 {
527 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
528 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
529 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
530 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
531 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
532 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
533 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
534 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
535 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
536 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
537 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
538 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
539 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
540 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
541 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
542 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
543 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
544 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
545 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
547 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
548 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
549 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
550 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
551 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
552 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
553 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
554 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
555 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
556 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
557 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
558 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
559 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
560 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
561 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
562 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
563 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
564 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
565 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
566 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
567 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
568 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
569 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
571 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
572 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
573 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
574 
575 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
576 }
577 
578 static void lan78xx_update_stats(struct lan78xx_net *dev)
579 {
580 	u32 *p, *count, *max;
581 	u64 *data;
582 	int i;
583 	struct lan78xx_statstage lan78xx_stats;
584 
585 	if (usb_autopm_get_interface(dev->intf) < 0)
586 		return;
587 
588 	p = (u32 *)&lan78xx_stats;
589 	count = (u32 *)&dev->stats.rollover_count;
590 	max = (u32 *)&dev->stats.rollover_max;
591 	data = (u64 *)&dev->stats.curr_stat;
592 
593 	mutex_lock(&dev->stats.access_lock);
594 
595 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
596 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
597 
598 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
599 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
600 
601 	mutex_unlock(&dev->stats.access_lock);
602 
603 	usb_autopm_put_interface(dev->intf);
604 }
605 
606 /* Loop until the read is completed with timeout called with phy_mutex held */
607 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
608 {
609 	unsigned long start_time = jiffies;
610 	u32 val;
611 	int ret;
612 
613 	do {
614 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
615 		if (unlikely(ret < 0))
616 			return -EIO;
617 
618 		if (!(val & MII_ACC_MII_BUSY_))
619 			return 0;
620 	} while (!time_after(jiffies, start_time + HZ));
621 
622 	return -EIO;
623 }
624 
625 static inline u32 mii_access(int id, int index, int read)
626 {
627 	u32 ret;
628 
629 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
630 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
631 	if (read)
632 		ret |= MII_ACC_MII_READ_;
633 	else
634 		ret |= MII_ACC_MII_WRITE_;
635 	ret |= MII_ACC_MII_BUSY_;
636 
637 	return ret;
638 }
639 
640 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
641 {
642 	unsigned long start_time = jiffies;
643 	u32 val;
644 	int ret;
645 
646 	do {
647 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
648 		if (unlikely(ret < 0))
649 			return -EIO;
650 
651 		if (!(val & E2P_CMD_EPC_BUSY_) ||
652 		    (val & E2P_CMD_EPC_TIMEOUT_))
653 			break;
654 		usleep_range(40, 100);
655 	} while (!time_after(jiffies, start_time + HZ));
656 
657 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
658 		netdev_warn(dev->net, "EEPROM read operation timeout");
659 		return -EIO;
660 	}
661 
662 	return 0;
663 }
664 
665 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
666 {
667 	unsigned long start_time = jiffies;
668 	u32 val;
669 	int ret;
670 
671 	do {
672 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
673 		if (unlikely(ret < 0))
674 			return -EIO;
675 
676 		if (!(val & E2P_CMD_EPC_BUSY_))
677 			return 0;
678 
679 		usleep_range(40, 100);
680 	} while (!time_after(jiffies, start_time + HZ));
681 
682 	netdev_warn(dev->net, "EEPROM is busy");
683 	return -EIO;
684 }
685 
686 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
687 				   u32 length, u8 *data)
688 {
689 	u32 val;
690 	u32 saved;
691 	int i, ret;
692 	int retval;
693 
694 	/* depends on chip, some EEPROM pins are muxed with LED function.
695 	 * disable & restore LED function to access EEPROM.
696 	 */
697 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
698 	saved = val;
699 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
700 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
701 		ret = lan78xx_write_reg(dev, HW_CFG, val);
702 	}
703 
704 	retval = lan78xx_eeprom_confirm_not_busy(dev);
705 	if (retval)
706 		return retval;
707 
708 	for (i = 0; i < length; i++) {
709 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
710 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
711 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
712 		if (unlikely(ret < 0)) {
713 			retval = -EIO;
714 			goto exit;
715 		}
716 
717 		retval = lan78xx_wait_eeprom(dev);
718 		if (retval < 0)
719 			goto exit;
720 
721 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
722 		if (unlikely(ret < 0)) {
723 			retval = -EIO;
724 			goto exit;
725 		}
726 
727 		data[i] = val & 0xFF;
728 		offset++;
729 	}
730 
731 	retval = 0;
732 exit:
733 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
734 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
735 
736 	return retval;
737 }
738 
739 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
740 			       u32 length, u8 *data)
741 {
742 	u8 sig;
743 	int ret;
744 
745 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
746 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
747 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
748 	else
749 		ret = -EINVAL;
750 
751 	return ret;
752 }
753 
754 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
755 				    u32 length, u8 *data)
756 {
757 	u32 val;
758 	u32 saved;
759 	int i, ret;
760 	int retval;
761 
762 	/* depends on chip, some EEPROM pins are muxed with LED function.
763 	 * disable & restore LED function to access EEPROM.
764 	 */
765 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
766 	saved = val;
767 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
768 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
769 		ret = lan78xx_write_reg(dev, HW_CFG, val);
770 	}
771 
772 	retval = lan78xx_eeprom_confirm_not_busy(dev);
773 	if (retval)
774 		goto exit;
775 
776 	/* Issue write/erase enable command */
777 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
778 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
779 	if (unlikely(ret < 0)) {
780 		retval = -EIO;
781 		goto exit;
782 	}
783 
784 	retval = lan78xx_wait_eeprom(dev);
785 	if (retval < 0)
786 		goto exit;
787 
788 	for (i = 0; i < length; i++) {
789 		/* Fill data register */
790 		val = data[i];
791 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
792 		if (ret < 0) {
793 			retval = -EIO;
794 			goto exit;
795 		}
796 
797 		/* Send "write" command */
798 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
799 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
800 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
801 		if (ret < 0) {
802 			retval = -EIO;
803 			goto exit;
804 		}
805 
806 		retval = lan78xx_wait_eeprom(dev);
807 		if (retval < 0)
808 			goto exit;
809 
810 		offset++;
811 	}
812 
813 	retval = 0;
814 exit:
815 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
816 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
817 
818 	return retval;
819 }
820 
821 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
822 				u32 length, u8 *data)
823 {
824 	int i;
825 	u32 buf;
826 	unsigned long timeout;
827 
828 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
829 
830 	if (buf & OTP_PWR_DN_PWRDN_N_) {
831 		/* clear it and wait to be cleared */
832 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
833 
834 		timeout = jiffies + HZ;
835 		do {
836 			usleep_range(1, 10);
837 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
838 			if (time_after(jiffies, timeout)) {
839 				netdev_warn(dev->net,
840 					    "timeout on OTP_PWR_DN");
841 				return -EIO;
842 			}
843 		} while (buf & OTP_PWR_DN_PWRDN_N_);
844 	}
845 
846 	for (i = 0; i < length; i++) {
847 		lan78xx_write_reg(dev, OTP_ADDR1,
848 					((offset + i) >> 8) & OTP_ADDR1_15_11);
849 		lan78xx_write_reg(dev, OTP_ADDR2,
850 					((offset + i) & OTP_ADDR2_10_3));
851 
852 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
853 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
854 
855 		timeout = jiffies + HZ;
856 		do {
857 			udelay(1);
858 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
859 			if (time_after(jiffies, timeout)) {
860 				netdev_warn(dev->net,
861 					    "timeout on OTP_STATUS");
862 				return -EIO;
863 			}
864 		} while (buf & OTP_STATUS_BUSY_);
865 
866 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
867 
868 		data[i] = (u8)(buf & 0xFF);
869 	}
870 
871 	return 0;
872 }
873 
874 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
875 				 u32 length, u8 *data)
876 {
877 	int i;
878 	u32 buf;
879 	unsigned long timeout;
880 
881 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
882 
883 	if (buf & OTP_PWR_DN_PWRDN_N_) {
884 		/* clear it and wait to be cleared */
885 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
886 
887 		timeout = jiffies + HZ;
888 		do {
889 			udelay(1);
890 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
891 			if (time_after(jiffies, timeout)) {
892 				netdev_warn(dev->net,
893 					    "timeout on OTP_PWR_DN completion");
894 				return -EIO;
895 			}
896 		} while (buf & OTP_PWR_DN_PWRDN_N_);
897 	}
898 
899 	/* set to BYTE program mode */
900 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
901 
902 	for (i = 0; i < length; i++) {
903 		lan78xx_write_reg(dev, OTP_ADDR1,
904 					((offset + i) >> 8) & OTP_ADDR1_15_11);
905 		lan78xx_write_reg(dev, OTP_ADDR2,
906 					((offset + i) & OTP_ADDR2_10_3));
907 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
908 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
909 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
910 
911 		timeout = jiffies + HZ;
912 		do {
913 			udelay(1);
914 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
915 			if (time_after(jiffies, timeout)) {
916 				netdev_warn(dev->net,
917 					    "Timeout on OTP_STATUS completion");
918 				return -EIO;
919 			}
920 		} while (buf & OTP_STATUS_BUSY_);
921 	}
922 
923 	return 0;
924 }
925 
926 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
927 			    u32 length, u8 *data)
928 {
929 	u8 sig;
930 	int ret;
931 
932 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
933 
934 	if (ret == 0) {
935 		if (sig == OTP_INDICATOR_2)
936 			offset += 0x100;
937 		else if (sig != OTP_INDICATOR_1)
938 			ret = -EINVAL;
939 		if (!ret)
940 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
941 	}
942 
943 	return ret;
944 }
945 
946 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
947 {
948 	int i, ret;
949 
950 	for (i = 0; i < 100; i++) {
951 		u32 dp_sel;
952 
953 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
954 		if (unlikely(ret < 0))
955 			return -EIO;
956 
957 		if (dp_sel & DP_SEL_DPRDY_)
958 			return 0;
959 
960 		usleep_range(40, 100);
961 	}
962 
963 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
964 
965 	return -EIO;
966 }
967 
968 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
969 				  u32 addr, u32 length, u32 *buf)
970 {
971 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
972 	u32 dp_sel;
973 	int i, ret;
974 
975 	if (usb_autopm_get_interface(dev->intf) < 0)
976 			return 0;
977 
978 	mutex_lock(&pdata->dataport_mutex);
979 
980 	ret = lan78xx_dataport_wait_not_busy(dev);
981 	if (ret < 0)
982 		goto done;
983 
984 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
985 
986 	dp_sel &= ~DP_SEL_RSEL_MASK_;
987 	dp_sel |= ram_select;
988 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
989 
990 	for (i = 0; i < length; i++) {
991 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
992 
993 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
994 
995 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
996 
997 		ret = lan78xx_dataport_wait_not_busy(dev);
998 		if (ret < 0)
999 			goto done;
1000 	}
1001 
1002 done:
1003 	mutex_unlock(&pdata->dataport_mutex);
1004 	usb_autopm_put_interface(dev->intf);
1005 
1006 	return ret;
1007 }
1008 
1009 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1010 				    int index, u8 addr[ETH_ALEN])
1011 {
1012 	u32 temp;
1013 
1014 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1015 		temp = addr[3];
1016 		temp = addr[2] | (temp << 8);
1017 		temp = addr[1] | (temp << 8);
1018 		temp = addr[0] | (temp << 8);
1019 		pdata->pfilter_table[index][1] = temp;
1020 		temp = addr[5];
1021 		temp = addr[4] | (temp << 8);
1022 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1023 		pdata->pfilter_table[index][0] = temp;
1024 	}
1025 }
1026 
1027 /* returns hash bit number for given MAC address */
1028 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1029 {
1030 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1031 }
1032 
1033 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1034 {
1035 	struct lan78xx_priv *pdata =
1036 			container_of(param, struct lan78xx_priv, set_multicast);
1037 	struct lan78xx_net *dev = pdata->dev;
1038 	int i;
1039 
1040 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1041 		  pdata->rfe_ctl);
1042 
1043 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1044 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1045 
1046 	for (i = 1; i < NUM_OF_MAF; i++) {
1047 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1048 		lan78xx_write_reg(dev, MAF_LO(i),
1049 					pdata->pfilter_table[i][1]);
1050 		lan78xx_write_reg(dev, MAF_HI(i),
1051 					pdata->pfilter_table[i][0]);
1052 	}
1053 
1054 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1055 }
1056 
1057 static void lan78xx_set_multicast(struct net_device *netdev)
1058 {
1059 	struct lan78xx_net *dev = netdev_priv(netdev);
1060 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1061 	unsigned long flags;
1062 	int i;
1063 
1064 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1065 
1066 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1067 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1068 
1069 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1070 			pdata->mchash_table[i] = 0;
1071 	/* pfilter_table[0] has own HW address */
1072 	for (i = 1; i < NUM_OF_MAF; i++) {
1073 			pdata->pfilter_table[i][0] =
1074 			pdata->pfilter_table[i][1] = 0;
1075 	}
1076 
1077 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1078 
1079 	if (dev->net->flags & IFF_PROMISC) {
1080 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1081 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1082 	} else {
1083 		if (dev->net->flags & IFF_ALLMULTI) {
1084 			netif_dbg(dev, drv, dev->net,
1085 				  "receive all multicast enabled");
1086 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1087 		}
1088 	}
1089 
1090 	if (netdev_mc_count(dev->net)) {
1091 		struct netdev_hw_addr *ha;
1092 		int i;
1093 
1094 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1095 
1096 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1097 
1098 		i = 1;
1099 		netdev_for_each_mc_addr(ha, netdev) {
1100 			/* set first 32 into Perfect Filter */
1101 			if (i < 33) {
1102 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1103 			} else {
1104 				u32 bitnum = lan78xx_hash(ha->addr);
1105 
1106 				pdata->mchash_table[bitnum / 32] |=
1107 							(1 << (bitnum % 32));
1108 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1109 			}
1110 			i++;
1111 		}
1112 	}
1113 
1114 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1115 
1116 	/* defer register writes to a sleepable context */
1117 	schedule_work(&pdata->set_multicast);
1118 }
1119 
1120 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1121 				      u16 lcladv, u16 rmtadv)
1122 {
1123 	u32 flow = 0, fct_flow = 0;
1124 	u8 cap;
1125 
1126 	if (dev->fc_autoneg)
1127 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1128 	else
1129 		cap = dev->fc_request_control;
1130 
1131 	if (cap & FLOW_CTRL_TX)
1132 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1133 
1134 	if (cap & FLOW_CTRL_RX)
1135 		flow |= FLOW_CR_RX_FCEN_;
1136 
1137 	if (dev->udev->speed == USB_SPEED_SUPER)
1138 		fct_flow = 0x817;
1139 	else if (dev->udev->speed == USB_SPEED_HIGH)
1140 		fct_flow = 0x211;
1141 
1142 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1143 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1144 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1145 
1146 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1147 
1148 	/* threshold value should be set before enabling flow */
1149 	lan78xx_write_reg(dev, FLOW, flow);
1150 
1151 	return 0;
1152 }
1153 
1154 static int lan78xx_link_reset(struct lan78xx_net *dev)
1155 {
1156 	struct phy_device *phydev = dev->net->phydev;
1157 	struct ethtool_link_ksettings ecmd;
1158 	int ladv, radv, ret;
1159 	u32 buf;
1160 
1161 	/* clear LAN78xx interrupt status */
1162 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1163 	if (unlikely(ret < 0))
1164 		return -EIO;
1165 
1166 	phy_read_status(phydev);
1167 
1168 	if (!phydev->link && dev->link_on) {
1169 		dev->link_on = false;
1170 
1171 		/* reset MAC */
1172 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1173 		if (unlikely(ret < 0))
1174 			return -EIO;
1175 		buf |= MAC_CR_RST_;
1176 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1177 		if (unlikely(ret < 0))
1178 			return -EIO;
1179 
1180 		del_timer(&dev->stat_monitor);
1181 	} else if (phydev->link && !dev->link_on) {
1182 		dev->link_on = true;
1183 
1184 		phy_ethtool_ksettings_get(phydev, &ecmd);
1185 
1186 		if (dev->udev->speed == USB_SPEED_SUPER) {
1187 			if (ecmd.base.speed == 1000) {
1188 				/* disable U2 */
1189 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1190 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1191 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1192 				/* enable U1 */
1193 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1194 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1195 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1196 			} else {
1197 				/* enable U1 & U2 */
1198 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1199 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1200 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1201 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1202 			}
1203 		}
1204 
1205 		ladv = phy_read(phydev, MII_ADVERTISE);
1206 		if (ladv < 0)
1207 			return ladv;
1208 
1209 		radv = phy_read(phydev, MII_LPA);
1210 		if (radv < 0)
1211 			return radv;
1212 
1213 		netif_dbg(dev, link, dev->net,
1214 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1215 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1216 
1217 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1218 						 radv);
1219 
1220 		if (!timer_pending(&dev->stat_monitor)) {
1221 			dev->delta = 1;
1222 			mod_timer(&dev->stat_monitor,
1223 				  jiffies + STAT_UPDATE_TIMER);
1224 		}
1225 
1226 		tasklet_schedule(&dev->bh);
1227 	}
1228 
1229 	return ret;
1230 }
1231 
1232 /* some work can't be done in tasklets, so we use keventd
1233  *
1234  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1235  * but tasklet_schedule() doesn't.	hope the failure is rare.
1236  */
1237 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1238 {
1239 	set_bit(work, &dev->flags);
1240 	if (!schedule_delayed_work(&dev->wq, 0))
1241 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1242 }
1243 
1244 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1245 {
1246 	u32 intdata;
1247 
1248 	if (urb->actual_length != 4) {
1249 		netdev_warn(dev->net,
1250 			    "unexpected urb length %d", urb->actual_length);
1251 		return;
1252 	}
1253 
1254 	intdata = get_unaligned_le32(urb->transfer_buffer);
1255 
1256 	if (intdata & INT_ENP_PHY_INT) {
1257 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1258 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1259 
1260 		if (dev->domain_data.phyirq > 0) {
1261 			local_irq_disable();
1262 			generic_handle_irq(dev->domain_data.phyirq);
1263 			local_irq_enable();
1264 		}
1265 	} else
1266 		netdev_warn(dev->net,
1267 			    "unexpected interrupt: 0x%08x\n", intdata);
1268 }
1269 
1270 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1271 {
1272 	return MAX_EEPROM_SIZE;
1273 }
1274 
1275 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1276 				      struct ethtool_eeprom *ee, u8 *data)
1277 {
1278 	struct lan78xx_net *dev = netdev_priv(netdev);
1279 	int ret;
1280 
1281 	ret = usb_autopm_get_interface(dev->intf);
1282 	if (ret)
1283 		return ret;
1284 
1285 	ee->magic = LAN78XX_EEPROM_MAGIC;
1286 
1287 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1288 
1289 	usb_autopm_put_interface(dev->intf);
1290 
1291 	return ret;
1292 }
1293 
1294 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1295 				      struct ethtool_eeprom *ee, u8 *data)
1296 {
1297 	struct lan78xx_net *dev = netdev_priv(netdev);
1298 	int ret;
1299 
1300 	ret = usb_autopm_get_interface(dev->intf);
1301 	if (ret)
1302 		return ret;
1303 
1304 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1305 	 * to load data from EEPROM
1306 	 */
1307 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1308 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1309 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1310 		 (ee->offset == 0) &&
1311 		 (ee->len == 512) &&
1312 		 (data[0] == OTP_INDICATOR_1))
1313 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1314 
1315 	usb_autopm_put_interface(dev->intf);
1316 
1317 	return ret;
1318 }
1319 
1320 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1321 				u8 *data)
1322 {
1323 	if (stringset == ETH_SS_STATS)
1324 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1325 }
1326 
1327 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1328 {
1329 	if (sset == ETH_SS_STATS)
1330 		return ARRAY_SIZE(lan78xx_gstrings);
1331 	else
1332 		return -EOPNOTSUPP;
1333 }
1334 
1335 static void lan78xx_get_stats(struct net_device *netdev,
1336 			      struct ethtool_stats *stats, u64 *data)
1337 {
1338 	struct lan78xx_net *dev = netdev_priv(netdev);
1339 
1340 	lan78xx_update_stats(dev);
1341 
1342 	mutex_lock(&dev->stats.access_lock);
1343 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1344 	mutex_unlock(&dev->stats.access_lock);
1345 }
1346 
1347 static void lan78xx_get_wol(struct net_device *netdev,
1348 			    struct ethtool_wolinfo *wol)
1349 {
1350 	struct lan78xx_net *dev = netdev_priv(netdev);
1351 	int ret;
1352 	u32 buf;
1353 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1354 
1355 	if (usb_autopm_get_interface(dev->intf) < 0)
1356 			return;
1357 
1358 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1359 	if (unlikely(ret < 0)) {
1360 		wol->supported = 0;
1361 		wol->wolopts = 0;
1362 	} else {
1363 		if (buf & USB_CFG_RMT_WKP_) {
1364 			wol->supported = WAKE_ALL;
1365 			wol->wolopts = pdata->wol;
1366 		} else {
1367 			wol->supported = 0;
1368 			wol->wolopts = 0;
1369 		}
1370 	}
1371 
1372 	usb_autopm_put_interface(dev->intf);
1373 }
1374 
1375 static int lan78xx_set_wol(struct net_device *netdev,
1376 			   struct ethtool_wolinfo *wol)
1377 {
1378 	struct lan78xx_net *dev = netdev_priv(netdev);
1379 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1380 	int ret;
1381 
1382 	ret = usb_autopm_get_interface(dev->intf);
1383 	if (ret < 0)
1384 		return ret;
1385 
1386 	if (wol->wolopts & ~WAKE_ALL)
1387 		return -EINVAL;
1388 
1389 	pdata->wol = wol->wolopts;
1390 
1391 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1392 
1393 	phy_ethtool_set_wol(netdev->phydev, wol);
1394 
1395 	usb_autopm_put_interface(dev->intf);
1396 
1397 	return ret;
1398 }
1399 
1400 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1401 {
1402 	struct lan78xx_net *dev = netdev_priv(net);
1403 	struct phy_device *phydev = net->phydev;
1404 	int ret;
1405 	u32 buf;
1406 
1407 	ret = usb_autopm_get_interface(dev->intf);
1408 	if (ret < 0)
1409 		return ret;
1410 
1411 	ret = phy_ethtool_get_eee(phydev, edata);
1412 	if (ret < 0)
1413 		goto exit;
1414 
1415 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1416 	if (buf & MAC_CR_EEE_EN_) {
1417 		edata->eee_enabled = true;
1418 		edata->eee_active = !!(edata->advertised &
1419 				       edata->lp_advertised);
1420 		edata->tx_lpi_enabled = true;
1421 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1422 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1423 		edata->tx_lpi_timer = buf;
1424 	} else {
1425 		edata->eee_enabled = false;
1426 		edata->eee_active = false;
1427 		edata->tx_lpi_enabled = false;
1428 		edata->tx_lpi_timer = 0;
1429 	}
1430 
1431 	ret = 0;
1432 exit:
1433 	usb_autopm_put_interface(dev->intf);
1434 
1435 	return ret;
1436 }
1437 
1438 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1439 {
1440 	struct lan78xx_net *dev = netdev_priv(net);
1441 	int ret;
1442 	u32 buf;
1443 
1444 	ret = usb_autopm_get_interface(dev->intf);
1445 	if (ret < 0)
1446 		return ret;
1447 
1448 	if (edata->eee_enabled) {
1449 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1450 		buf |= MAC_CR_EEE_EN_;
1451 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1452 
1453 		phy_ethtool_set_eee(net->phydev, edata);
1454 
1455 		buf = (u32)edata->tx_lpi_timer;
1456 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1457 	} else {
1458 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1459 		buf &= ~MAC_CR_EEE_EN_;
1460 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1461 	}
1462 
1463 	usb_autopm_put_interface(dev->intf);
1464 
1465 	return 0;
1466 }
1467 
1468 static u32 lan78xx_get_link(struct net_device *net)
1469 {
1470 	phy_read_status(net->phydev);
1471 
1472 	return net->phydev->link;
1473 }
1474 
1475 static void lan78xx_get_drvinfo(struct net_device *net,
1476 				struct ethtool_drvinfo *info)
1477 {
1478 	struct lan78xx_net *dev = netdev_priv(net);
1479 
1480 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1481 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1482 }
1483 
1484 static u32 lan78xx_get_msglevel(struct net_device *net)
1485 {
1486 	struct lan78xx_net *dev = netdev_priv(net);
1487 
1488 	return dev->msg_enable;
1489 }
1490 
1491 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1492 {
1493 	struct lan78xx_net *dev = netdev_priv(net);
1494 
1495 	dev->msg_enable = level;
1496 }
1497 
1498 static int lan78xx_get_link_ksettings(struct net_device *net,
1499 				      struct ethtool_link_ksettings *cmd)
1500 {
1501 	struct lan78xx_net *dev = netdev_priv(net);
1502 	struct phy_device *phydev = net->phydev;
1503 	int ret;
1504 
1505 	ret = usb_autopm_get_interface(dev->intf);
1506 	if (ret < 0)
1507 		return ret;
1508 
1509 	phy_ethtool_ksettings_get(phydev, cmd);
1510 
1511 	usb_autopm_put_interface(dev->intf);
1512 
1513 	return ret;
1514 }
1515 
1516 static int lan78xx_set_link_ksettings(struct net_device *net,
1517 				      const struct ethtool_link_ksettings *cmd)
1518 {
1519 	struct lan78xx_net *dev = netdev_priv(net);
1520 	struct phy_device *phydev = net->phydev;
1521 	int ret = 0;
1522 	int temp;
1523 
1524 	ret = usb_autopm_get_interface(dev->intf);
1525 	if (ret < 0)
1526 		return ret;
1527 
1528 	/* change speed & duplex */
1529 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1530 
1531 	if (!cmd->base.autoneg) {
1532 		/* force link down */
1533 		temp = phy_read(phydev, MII_BMCR);
1534 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1535 		mdelay(1);
1536 		phy_write(phydev, MII_BMCR, temp);
1537 	}
1538 
1539 	usb_autopm_put_interface(dev->intf);
1540 
1541 	return ret;
1542 }
1543 
1544 static void lan78xx_get_pause(struct net_device *net,
1545 			      struct ethtool_pauseparam *pause)
1546 {
1547 	struct lan78xx_net *dev = netdev_priv(net);
1548 	struct phy_device *phydev = net->phydev;
1549 	struct ethtool_link_ksettings ecmd;
1550 
1551 	phy_ethtool_ksettings_get(phydev, &ecmd);
1552 
1553 	pause->autoneg = dev->fc_autoneg;
1554 
1555 	if (dev->fc_request_control & FLOW_CTRL_TX)
1556 		pause->tx_pause = 1;
1557 
1558 	if (dev->fc_request_control & FLOW_CTRL_RX)
1559 		pause->rx_pause = 1;
1560 }
1561 
1562 static int lan78xx_set_pause(struct net_device *net,
1563 			     struct ethtool_pauseparam *pause)
1564 {
1565 	struct lan78xx_net *dev = netdev_priv(net);
1566 	struct phy_device *phydev = net->phydev;
1567 	struct ethtool_link_ksettings ecmd;
1568 	int ret;
1569 
1570 	phy_ethtool_ksettings_get(phydev, &ecmd);
1571 
1572 	if (pause->autoneg && !ecmd.base.autoneg) {
1573 		ret = -EINVAL;
1574 		goto exit;
1575 	}
1576 
1577 	dev->fc_request_control = 0;
1578 	if (pause->rx_pause)
1579 		dev->fc_request_control |= FLOW_CTRL_RX;
1580 
1581 	if (pause->tx_pause)
1582 		dev->fc_request_control |= FLOW_CTRL_TX;
1583 
1584 	if (ecmd.base.autoneg) {
1585 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1586 		u32 mii_adv;
1587 
1588 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1589 				   ecmd.link_modes.advertising);
1590 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1591 				   ecmd.link_modes.advertising);
1592 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1593 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1594 		linkmode_or(ecmd.link_modes.advertising, fc,
1595 			    ecmd.link_modes.advertising);
1596 
1597 		phy_ethtool_ksettings_set(phydev, &ecmd);
1598 	}
1599 
1600 	dev->fc_autoneg = pause->autoneg;
1601 
1602 	ret = 0;
1603 exit:
1604 	return ret;
1605 }
1606 
1607 static int lan78xx_get_regs_len(struct net_device *netdev)
1608 {
1609 	if (!netdev->phydev)
1610 		return (sizeof(lan78xx_regs));
1611 	else
1612 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1613 }
1614 
1615 static void
1616 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1617 		 void *buf)
1618 {
1619 	u32 *data = buf;
1620 	int i, j;
1621 	struct lan78xx_net *dev = netdev_priv(netdev);
1622 
1623 	/* Read Device/MAC registers */
1624 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1625 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1626 
1627 	if (!netdev->phydev)
1628 		return;
1629 
1630 	/* Read PHY registers */
1631 	for (j = 0; j < 32; i++, j++)
1632 		data[i] = phy_read(netdev->phydev, j);
1633 }
1634 
1635 static const struct ethtool_ops lan78xx_ethtool_ops = {
1636 	.get_link	= lan78xx_get_link,
1637 	.nway_reset	= phy_ethtool_nway_reset,
1638 	.get_drvinfo	= lan78xx_get_drvinfo,
1639 	.get_msglevel	= lan78xx_get_msglevel,
1640 	.set_msglevel	= lan78xx_set_msglevel,
1641 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1642 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1643 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1644 	.get_ethtool_stats = lan78xx_get_stats,
1645 	.get_sset_count = lan78xx_get_sset_count,
1646 	.get_strings	= lan78xx_get_strings,
1647 	.get_wol	= lan78xx_get_wol,
1648 	.set_wol	= lan78xx_set_wol,
1649 	.get_eee	= lan78xx_get_eee,
1650 	.set_eee	= lan78xx_set_eee,
1651 	.get_pauseparam	= lan78xx_get_pause,
1652 	.set_pauseparam	= lan78xx_set_pause,
1653 	.get_link_ksettings = lan78xx_get_link_ksettings,
1654 	.set_link_ksettings = lan78xx_set_link_ksettings,
1655 	.get_regs_len	= lan78xx_get_regs_len,
1656 	.get_regs	= lan78xx_get_regs,
1657 };
1658 
1659 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1660 {
1661 	u32 addr_lo, addr_hi;
1662 	u8 addr[6];
1663 
1664 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1665 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1666 
1667 	addr[0] = addr_lo & 0xFF;
1668 	addr[1] = (addr_lo >> 8) & 0xFF;
1669 	addr[2] = (addr_lo >> 16) & 0xFF;
1670 	addr[3] = (addr_lo >> 24) & 0xFF;
1671 	addr[4] = addr_hi & 0xFF;
1672 	addr[5] = (addr_hi >> 8) & 0xFF;
1673 
1674 	if (!is_valid_ether_addr(addr)) {
1675 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1676 			/* valid address present in Device Tree */
1677 			netif_dbg(dev, ifup, dev->net,
1678 				  "MAC address read from Device Tree");
1679 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1680 						 ETH_ALEN, addr) == 0) ||
1681 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1682 					      ETH_ALEN, addr) == 0)) &&
1683 			   is_valid_ether_addr(addr)) {
1684 			/* eeprom values are valid so use them */
1685 			netif_dbg(dev, ifup, dev->net,
1686 				  "MAC address read from EEPROM");
1687 		} else {
1688 			/* generate random MAC */
1689 			eth_random_addr(addr);
1690 			netif_dbg(dev, ifup, dev->net,
1691 				  "MAC address set to random addr");
1692 		}
1693 
1694 		addr_lo = addr[0] | (addr[1] << 8) |
1695 			  (addr[2] << 16) | (addr[3] << 24);
1696 		addr_hi = addr[4] | (addr[5] << 8);
1697 
1698 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1699 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1700 	}
1701 
1702 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1703 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1704 
1705 	ether_addr_copy(dev->net->dev_addr, addr);
1706 }
1707 
1708 /* MDIO read and write wrappers for phylib */
1709 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1710 {
1711 	struct lan78xx_net *dev = bus->priv;
1712 	u32 val, addr;
1713 	int ret;
1714 
1715 	ret = usb_autopm_get_interface(dev->intf);
1716 	if (ret < 0)
1717 		return ret;
1718 
1719 	mutex_lock(&dev->phy_mutex);
1720 
1721 	/* confirm MII not busy */
1722 	ret = lan78xx_phy_wait_not_busy(dev);
1723 	if (ret < 0)
1724 		goto done;
1725 
1726 	/* set the address, index & direction (read from PHY) */
1727 	addr = mii_access(phy_id, idx, MII_READ);
1728 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1729 
1730 	ret = lan78xx_phy_wait_not_busy(dev);
1731 	if (ret < 0)
1732 		goto done;
1733 
1734 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1735 
1736 	ret = (int)(val & 0xFFFF);
1737 
1738 done:
1739 	mutex_unlock(&dev->phy_mutex);
1740 	usb_autopm_put_interface(dev->intf);
1741 
1742 	return ret;
1743 }
1744 
1745 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1746 				 u16 regval)
1747 {
1748 	struct lan78xx_net *dev = bus->priv;
1749 	u32 val, addr;
1750 	int ret;
1751 
1752 	ret = usb_autopm_get_interface(dev->intf);
1753 	if (ret < 0)
1754 		return ret;
1755 
1756 	mutex_lock(&dev->phy_mutex);
1757 
1758 	/* confirm MII not busy */
1759 	ret = lan78xx_phy_wait_not_busy(dev);
1760 	if (ret < 0)
1761 		goto done;
1762 
1763 	val = (u32)regval;
1764 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1765 
1766 	/* set the address, index & direction (write to PHY) */
1767 	addr = mii_access(phy_id, idx, MII_WRITE);
1768 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1769 
1770 	ret = lan78xx_phy_wait_not_busy(dev);
1771 	if (ret < 0)
1772 		goto done;
1773 
1774 done:
1775 	mutex_unlock(&dev->phy_mutex);
1776 	usb_autopm_put_interface(dev->intf);
1777 	return 0;
1778 }
1779 
1780 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1781 {
1782 	struct device_node *node;
1783 	int ret;
1784 
1785 	dev->mdiobus = mdiobus_alloc();
1786 	if (!dev->mdiobus) {
1787 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1788 		return -ENOMEM;
1789 	}
1790 
1791 	dev->mdiobus->priv = (void *)dev;
1792 	dev->mdiobus->read = lan78xx_mdiobus_read;
1793 	dev->mdiobus->write = lan78xx_mdiobus_write;
1794 	dev->mdiobus->name = "lan78xx-mdiobus";
1795 	dev->mdiobus->parent = &dev->udev->dev;
1796 
1797 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1798 		 dev->udev->bus->busnum, dev->udev->devnum);
1799 
1800 	switch (dev->chipid) {
1801 	case ID_REV_CHIP_ID_7800_:
1802 	case ID_REV_CHIP_ID_7850_:
1803 		/* set to internal PHY id */
1804 		dev->mdiobus->phy_mask = ~(1 << 1);
1805 		break;
1806 	case ID_REV_CHIP_ID_7801_:
1807 		/* scan thru PHYAD[2..0] */
1808 		dev->mdiobus->phy_mask = ~(0xFF);
1809 		break;
1810 	}
1811 
1812 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1813 	ret = of_mdiobus_register(dev->mdiobus, node);
1814 	of_node_put(node);
1815 	if (ret) {
1816 		netdev_err(dev->net, "can't register MDIO bus\n");
1817 		goto exit1;
1818 	}
1819 
1820 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1821 	return 0;
1822 exit1:
1823 	mdiobus_free(dev->mdiobus);
1824 	return ret;
1825 }
1826 
1827 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1828 {
1829 	mdiobus_unregister(dev->mdiobus);
1830 	mdiobus_free(dev->mdiobus);
1831 }
1832 
1833 static void lan78xx_link_status_change(struct net_device *net)
1834 {
1835 	struct phy_device *phydev = net->phydev;
1836 	int temp;
1837 
1838 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1839 	 * when cable is switched between long(~50+m) and short one.
1840 	 * As workaround, set to 10 before setting to 100
1841 	 * at forced 100 F/H mode.
1842 	 */
1843 	if (!phydev->autoneg && (phydev->speed == 100)) {
1844 		/* disable phy interrupt */
1845 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1846 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1847 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1848 
1849 		temp = phy_read(phydev, MII_BMCR);
1850 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1851 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1852 		temp |= BMCR_SPEED100;
1853 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1854 
1855 		/* clear pending interrupt generated while workaround */
1856 		temp = phy_read(phydev, LAN88XX_INT_STS);
1857 
1858 		/* enable phy interrupt back */
1859 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1860 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1861 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1862 	}
1863 }
1864 
1865 static int irq_map(struct irq_domain *d, unsigned int irq,
1866 		   irq_hw_number_t hwirq)
1867 {
1868 	struct irq_domain_data *data = d->host_data;
1869 
1870 	irq_set_chip_data(irq, data);
1871 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1872 	irq_set_noprobe(irq);
1873 
1874 	return 0;
1875 }
1876 
1877 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1878 {
1879 	irq_set_chip_and_handler(irq, NULL, NULL);
1880 	irq_set_chip_data(irq, NULL);
1881 }
1882 
1883 static const struct irq_domain_ops chip_domain_ops = {
1884 	.map	= irq_map,
1885 	.unmap	= irq_unmap,
1886 };
1887 
1888 static void lan78xx_irq_mask(struct irq_data *irqd)
1889 {
1890 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1891 
1892 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1893 }
1894 
1895 static void lan78xx_irq_unmask(struct irq_data *irqd)
1896 {
1897 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1898 
1899 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1900 }
1901 
1902 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1903 {
1904 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1905 
1906 	mutex_lock(&data->irq_lock);
1907 }
1908 
1909 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1910 {
1911 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1912 	struct lan78xx_net *dev =
1913 			container_of(data, struct lan78xx_net, domain_data);
1914 	u32 buf;
1915 
1916 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1917 	 * are only two callbacks executed in non-atomic contex.
1918 	 */
1919 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1920 	if (buf != data->irqenable)
1921 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1922 
1923 	mutex_unlock(&data->irq_lock);
1924 }
1925 
1926 static struct irq_chip lan78xx_irqchip = {
1927 	.name			= "lan78xx-irqs",
1928 	.irq_mask		= lan78xx_irq_mask,
1929 	.irq_unmask		= lan78xx_irq_unmask,
1930 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1931 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1932 };
1933 
1934 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1935 {
1936 	struct device_node *of_node;
1937 	struct irq_domain *irqdomain;
1938 	unsigned int irqmap = 0;
1939 	u32 buf;
1940 	int ret = 0;
1941 
1942 	of_node = dev->udev->dev.parent->of_node;
1943 
1944 	mutex_init(&dev->domain_data.irq_lock);
1945 
1946 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1947 	dev->domain_data.irqenable = buf;
1948 
1949 	dev->domain_data.irqchip = &lan78xx_irqchip;
1950 	dev->domain_data.irq_handler = handle_simple_irq;
1951 
1952 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1953 					  &chip_domain_ops, &dev->domain_data);
1954 	if (irqdomain) {
1955 		/* create mapping for PHY interrupt */
1956 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1957 		if (!irqmap) {
1958 			irq_domain_remove(irqdomain);
1959 
1960 			irqdomain = NULL;
1961 			ret = -EINVAL;
1962 		}
1963 	} else {
1964 		ret = -EINVAL;
1965 	}
1966 
1967 	dev->domain_data.irqdomain = irqdomain;
1968 	dev->domain_data.phyirq = irqmap;
1969 
1970 	return ret;
1971 }
1972 
1973 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1974 {
1975 	if (dev->domain_data.phyirq > 0) {
1976 		irq_dispose_mapping(dev->domain_data.phyirq);
1977 
1978 		if (dev->domain_data.irqdomain)
1979 			irq_domain_remove(dev->domain_data.irqdomain);
1980 	}
1981 	dev->domain_data.phyirq = 0;
1982 	dev->domain_data.irqdomain = NULL;
1983 }
1984 
1985 static int lan8835_fixup(struct phy_device *phydev)
1986 {
1987 	int buf;
1988 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1989 
1990 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1991 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1992 	buf &= ~0x1800;
1993 	buf |= 0x0800;
1994 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1995 
1996 	/* RGMII MAC TXC Delay Enable */
1997 	lan78xx_write_reg(dev, MAC_RGMII_ID,
1998 				MAC_RGMII_ID_TXC_DELAY_EN_);
1999 
2000 	/* RGMII TX DLL Tune Adjust */
2001 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2002 
2003 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2004 
2005 	return 1;
2006 }
2007 
2008 static int ksz9031rnx_fixup(struct phy_device *phydev)
2009 {
2010 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2011 
2012 	/* Micrel9301RNX PHY configuration */
2013 	/* RGMII Control Signal Pad Skew */
2014 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2015 	/* RGMII RX Data Pad Skew */
2016 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2017 	/* RGMII RX Clock Pad Skew */
2018 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2019 
2020 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2021 
2022 	return 1;
2023 }
2024 
2025 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2026 {
2027 	u32 buf;
2028 	int ret;
2029 	struct fixed_phy_status fphy_status = {
2030 		.link = 1,
2031 		.speed = SPEED_1000,
2032 		.duplex = DUPLEX_FULL,
2033 	};
2034 	struct phy_device *phydev;
2035 
2036 	phydev = phy_find_first(dev->mdiobus);
2037 	if (!phydev) {
2038 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2039 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2040 		if (IS_ERR(phydev)) {
2041 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2042 			return NULL;
2043 		}
2044 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2045 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2046 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2047 					MAC_RGMII_ID_TXC_DELAY_EN_);
2048 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2049 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2050 		buf |= HW_CFG_CLK125_EN_;
2051 		buf |= HW_CFG_REFCLK25_EN_;
2052 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2053 	} else {
2054 		if (!phydev->drv) {
2055 			netdev_err(dev->net, "no PHY driver found\n");
2056 			return NULL;
2057 		}
2058 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2059 		/* external PHY fixup for KSZ9031RNX */
2060 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2061 						 ksz9031rnx_fixup);
2062 		if (ret < 0) {
2063 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2064 			return NULL;
2065 		}
2066 		/* external PHY fixup for LAN8835 */
2067 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2068 						 lan8835_fixup);
2069 		if (ret < 0) {
2070 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2071 			return NULL;
2072 		}
2073 		/* add more external PHY fixup here if needed */
2074 
2075 		phydev->is_internal = false;
2076 	}
2077 	return phydev;
2078 }
2079 
2080 static int lan78xx_phy_init(struct lan78xx_net *dev)
2081 {
2082 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2083 	int ret;
2084 	u32 mii_adv;
2085 	struct phy_device *phydev;
2086 
2087 	switch (dev->chipid) {
2088 	case ID_REV_CHIP_ID_7801_:
2089 		phydev = lan7801_phy_init(dev);
2090 		if (!phydev) {
2091 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2092 			return -EIO;
2093 		}
2094 		break;
2095 
2096 	case ID_REV_CHIP_ID_7800_:
2097 	case ID_REV_CHIP_ID_7850_:
2098 		phydev = phy_find_first(dev->mdiobus);
2099 		if (!phydev) {
2100 			netdev_err(dev->net, "no PHY found\n");
2101 			return -EIO;
2102 		}
2103 		phydev->is_internal = true;
2104 		dev->interface = PHY_INTERFACE_MODE_GMII;
2105 		break;
2106 
2107 	default:
2108 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2109 		return -EIO;
2110 	}
2111 
2112 	/* if phyirq is not set, use polling mode in phylib */
2113 	if (dev->domain_data.phyirq > 0)
2114 		phydev->irq = dev->domain_data.phyirq;
2115 	else
2116 		phydev->irq = 0;
2117 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2118 
2119 	/* set to AUTOMDIX */
2120 	phydev->mdix = ETH_TP_MDI_AUTO;
2121 
2122 	ret = phy_connect_direct(dev->net, phydev,
2123 				 lan78xx_link_status_change,
2124 				 dev->interface);
2125 	if (ret) {
2126 		netdev_err(dev->net, "can't attach PHY to %s\n",
2127 			   dev->mdiobus->id);
2128 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2129 			if (phy_is_pseudo_fixed_link(phydev)) {
2130 				fixed_phy_unregister(phydev);
2131 			} else {
2132 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2133 							     0xfffffff0);
2134 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2135 							     0xfffffff0);
2136 			}
2137 		}
2138 		return -EIO;
2139 	}
2140 
2141 	/* MAC doesn't support 1000T Half */
2142 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2143 
2144 	/* support both flow controls */
2145 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2146 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2147 			   phydev->advertising);
2148 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2149 			   phydev->advertising);
2150 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2151 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2152 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2153 
2154 	if (phydev->mdio.dev.of_node) {
2155 		u32 reg;
2156 		int len;
2157 
2158 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2159 						      "microchip,led-modes",
2160 						      sizeof(u32));
2161 		if (len >= 0) {
2162 			/* Ensure the appropriate LEDs are enabled */
2163 			lan78xx_read_reg(dev, HW_CFG, &reg);
2164 			reg &= ~(HW_CFG_LED0_EN_ |
2165 				 HW_CFG_LED1_EN_ |
2166 				 HW_CFG_LED2_EN_ |
2167 				 HW_CFG_LED3_EN_);
2168 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2169 				(len > 1) * HW_CFG_LED1_EN_ |
2170 				(len > 2) * HW_CFG_LED2_EN_ |
2171 				(len > 3) * HW_CFG_LED3_EN_;
2172 			lan78xx_write_reg(dev, HW_CFG, reg);
2173 		}
2174 	}
2175 
2176 	genphy_config_aneg(phydev);
2177 
2178 	dev->fc_autoneg = phydev->autoneg;
2179 
2180 	return 0;
2181 }
2182 
2183 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2184 {
2185 	u32 buf;
2186 	bool rxenabled;
2187 
2188 	lan78xx_read_reg(dev, MAC_RX, &buf);
2189 
2190 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2191 
2192 	if (rxenabled) {
2193 		buf &= ~MAC_RX_RXEN_;
2194 		lan78xx_write_reg(dev, MAC_RX, buf);
2195 	}
2196 
2197 	/* add 4 to size for FCS */
2198 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2199 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2200 
2201 	lan78xx_write_reg(dev, MAC_RX, buf);
2202 
2203 	if (rxenabled) {
2204 		buf |= MAC_RX_RXEN_;
2205 		lan78xx_write_reg(dev, MAC_RX, buf);
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2212 {
2213 	struct sk_buff *skb;
2214 	unsigned long flags;
2215 	int count = 0;
2216 
2217 	spin_lock_irqsave(&q->lock, flags);
2218 	while (!skb_queue_empty(q)) {
2219 		struct skb_data	*entry;
2220 		struct urb *urb;
2221 		int ret;
2222 
2223 		skb_queue_walk(q, skb) {
2224 			entry = (struct skb_data *)skb->cb;
2225 			if (entry->state != unlink_start)
2226 				goto found;
2227 		}
2228 		break;
2229 found:
2230 		entry->state = unlink_start;
2231 		urb = entry->urb;
2232 
2233 		/* Get reference count of the URB to avoid it to be
2234 		 * freed during usb_unlink_urb, which may trigger
2235 		 * use-after-free problem inside usb_unlink_urb since
2236 		 * usb_unlink_urb is always racing with .complete
2237 		 * handler(include defer_bh).
2238 		 */
2239 		usb_get_urb(urb);
2240 		spin_unlock_irqrestore(&q->lock, flags);
2241 		/* during some PM-driven resume scenarios,
2242 		 * these (async) unlinks complete immediately
2243 		 */
2244 		ret = usb_unlink_urb(urb);
2245 		if (ret != -EINPROGRESS && ret != 0)
2246 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2247 		else
2248 			count++;
2249 		usb_put_urb(urb);
2250 		spin_lock_irqsave(&q->lock, flags);
2251 	}
2252 	spin_unlock_irqrestore(&q->lock, flags);
2253 	return count;
2254 }
2255 
2256 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2257 {
2258 	struct lan78xx_net *dev = netdev_priv(netdev);
2259 	int ll_mtu = new_mtu + netdev->hard_header_len;
2260 	int old_hard_mtu = dev->hard_mtu;
2261 	int old_rx_urb_size = dev->rx_urb_size;
2262 
2263 	/* no second zero-length packet read wanted after mtu-sized packets */
2264 	if ((ll_mtu % dev->maxpacket) == 0)
2265 		return -EDOM;
2266 
2267 	lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2268 
2269 	netdev->mtu = new_mtu;
2270 
2271 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2272 	if (dev->rx_urb_size == old_hard_mtu) {
2273 		dev->rx_urb_size = dev->hard_mtu;
2274 		if (dev->rx_urb_size > old_rx_urb_size) {
2275 			if (netif_running(dev->net)) {
2276 				unlink_urbs(dev, &dev->rxq);
2277 				tasklet_schedule(&dev->bh);
2278 			}
2279 		}
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2286 {
2287 	struct lan78xx_net *dev = netdev_priv(netdev);
2288 	struct sockaddr *addr = p;
2289 	u32 addr_lo, addr_hi;
2290 
2291 	if (netif_running(netdev))
2292 		return -EBUSY;
2293 
2294 	if (!is_valid_ether_addr(addr->sa_data))
2295 		return -EADDRNOTAVAIL;
2296 
2297 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2298 
2299 	addr_lo = netdev->dev_addr[0] |
2300 		  netdev->dev_addr[1] << 8 |
2301 		  netdev->dev_addr[2] << 16 |
2302 		  netdev->dev_addr[3] << 24;
2303 	addr_hi = netdev->dev_addr[4] |
2304 		  netdev->dev_addr[5] << 8;
2305 
2306 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2307 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2308 
2309 	/* Added to support MAC address changes */
2310 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2311 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2312 
2313 	return 0;
2314 }
2315 
2316 /* Enable or disable Rx checksum offload engine */
2317 static int lan78xx_set_features(struct net_device *netdev,
2318 				netdev_features_t features)
2319 {
2320 	struct lan78xx_net *dev = netdev_priv(netdev);
2321 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2322 	unsigned long flags;
2323 
2324 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2325 
2326 	if (features & NETIF_F_RXCSUM) {
2327 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2328 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2329 	} else {
2330 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2331 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2332 	}
2333 
2334 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2335 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2336 	else
2337 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2338 
2339 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2340 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2341 	else
2342 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2343 
2344 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2345 
2346 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2347 
2348 	return 0;
2349 }
2350 
2351 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2352 {
2353 	struct lan78xx_priv *pdata =
2354 			container_of(param, struct lan78xx_priv, set_vlan);
2355 	struct lan78xx_net *dev = pdata->dev;
2356 
2357 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2358 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2359 }
2360 
2361 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2362 				   __be16 proto, u16 vid)
2363 {
2364 	struct lan78xx_net *dev = netdev_priv(netdev);
2365 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2366 	u16 vid_bit_index;
2367 	u16 vid_dword_index;
2368 
2369 	vid_dword_index = (vid >> 5) & 0x7F;
2370 	vid_bit_index = vid & 0x1F;
2371 
2372 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2373 
2374 	/* defer register writes to a sleepable context */
2375 	schedule_work(&pdata->set_vlan);
2376 
2377 	return 0;
2378 }
2379 
2380 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2381 				    __be16 proto, u16 vid)
2382 {
2383 	struct lan78xx_net *dev = netdev_priv(netdev);
2384 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2385 	u16 vid_bit_index;
2386 	u16 vid_dword_index;
2387 
2388 	vid_dword_index = (vid >> 5) & 0x7F;
2389 	vid_bit_index = vid & 0x1F;
2390 
2391 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2392 
2393 	/* defer register writes to a sleepable context */
2394 	schedule_work(&pdata->set_vlan);
2395 
2396 	return 0;
2397 }
2398 
2399 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2400 {
2401 	int ret;
2402 	u32 buf;
2403 	u32 regs[6] = { 0 };
2404 
2405 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2406 	if (buf & USB_CFG1_LTM_ENABLE_) {
2407 		u8 temp[2];
2408 		/* Get values from EEPROM first */
2409 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2410 			if (temp[0] == 24) {
2411 				ret = lan78xx_read_raw_eeprom(dev,
2412 							      temp[1] * 2,
2413 							      24,
2414 							      (u8 *)regs);
2415 				if (ret < 0)
2416 					return;
2417 			}
2418 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2419 			if (temp[0] == 24) {
2420 				ret = lan78xx_read_raw_otp(dev,
2421 							   temp[1] * 2,
2422 							   24,
2423 							   (u8 *)regs);
2424 				if (ret < 0)
2425 					return;
2426 			}
2427 		}
2428 	}
2429 
2430 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2431 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2432 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2433 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2434 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2435 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2436 }
2437 
2438 static int lan78xx_reset(struct lan78xx_net *dev)
2439 {
2440 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2441 	u32 buf;
2442 	int ret = 0;
2443 	unsigned long timeout;
2444 	u8 sig;
2445 
2446 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2447 	buf |= HW_CFG_LRST_;
2448 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2449 
2450 	timeout = jiffies + HZ;
2451 	do {
2452 		mdelay(1);
2453 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2454 		if (time_after(jiffies, timeout)) {
2455 			netdev_warn(dev->net,
2456 				    "timeout on completion of LiteReset");
2457 			return -EIO;
2458 		}
2459 	} while (buf & HW_CFG_LRST_);
2460 
2461 	lan78xx_init_mac_address(dev);
2462 
2463 	/* save DEVID for later usage */
2464 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2465 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2466 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2467 
2468 	/* Respond to the IN token with a NAK */
2469 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2470 	buf |= USB_CFG_BIR_;
2471 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2472 
2473 	/* Init LTM */
2474 	lan78xx_init_ltm(dev);
2475 
2476 	if (dev->udev->speed == USB_SPEED_SUPER) {
2477 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2478 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2479 		dev->rx_qlen = 4;
2480 		dev->tx_qlen = 4;
2481 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2482 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2483 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2484 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2485 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2486 	} else {
2487 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2488 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2489 		dev->rx_qlen = 4;
2490 		dev->tx_qlen = 4;
2491 	}
2492 
2493 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2494 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2495 
2496 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2497 	buf |= HW_CFG_MEF_;
2498 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2499 
2500 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2501 	buf |= USB_CFG_BCE_;
2502 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2503 
2504 	/* set FIFO sizes */
2505 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2506 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2507 
2508 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2509 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2510 
2511 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2512 	ret = lan78xx_write_reg(dev, FLOW, 0);
2513 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2514 
2515 	/* Don't need rfe_ctl_lock during initialisation */
2516 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2517 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2518 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2519 
2520 	/* Enable or disable checksum offload engines */
2521 	lan78xx_set_features(dev->net, dev->net->features);
2522 
2523 	lan78xx_set_multicast(dev->net);
2524 
2525 	/* reset PHY */
2526 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2527 	buf |= PMT_CTL_PHY_RST_;
2528 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2529 
2530 	timeout = jiffies + HZ;
2531 	do {
2532 		mdelay(1);
2533 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2534 		if (time_after(jiffies, timeout)) {
2535 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2536 			return -EIO;
2537 		}
2538 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2539 
2540 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2541 	/* LAN7801 only has RGMII mode */
2542 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2543 		buf &= ~MAC_CR_GMII_EN_;
2544 
2545 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2546 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2547 		if (!ret && sig != EEPROM_INDICATOR) {
2548 			/* Implies there is no external eeprom. Set mac speed */
2549 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2550 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2551 		}
2552 	}
2553 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2554 
2555 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2556 	buf |= MAC_TX_TXEN_;
2557 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2558 
2559 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2560 	buf |= FCT_TX_CTL_EN_;
2561 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2562 
2563 	ret = lan78xx_set_rx_max_frame_length(dev,
2564 					      dev->net->mtu + VLAN_ETH_HLEN);
2565 
2566 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2567 	buf |= MAC_RX_RXEN_;
2568 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2569 
2570 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2571 	buf |= FCT_RX_CTL_EN_;
2572 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2573 
2574 	return 0;
2575 }
2576 
2577 static void lan78xx_init_stats(struct lan78xx_net *dev)
2578 {
2579 	u32 *p;
2580 	int i;
2581 
2582 	/* initialize for stats update
2583 	 * some counters are 20bits and some are 32bits
2584 	 */
2585 	p = (u32 *)&dev->stats.rollover_max;
2586 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2587 		p[i] = 0xFFFFF;
2588 
2589 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2590 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2591 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2592 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2593 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2594 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2595 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2596 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2597 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2598 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2599 
2600 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2601 }
2602 
2603 static int lan78xx_open(struct net_device *net)
2604 {
2605 	struct lan78xx_net *dev = netdev_priv(net);
2606 	int ret;
2607 
2608 	ret = usb_autopm_get_interface(dev->intf);
2609 	if (ret < 0)
2610 		goto out;
2611 
2612 	phy_start(net->phydev);
2613 
2614 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2615 
2616 	/* for Link Check */
2617 	if (dev->urb_intr) {
2618 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2619 		if (ret < 0) {
2620 			netif_err(dev, ifup, dev->net,
2621 				  "intr submit %d\n", ret);
2622 			goto done;
2623 		}
2624 	}
2625 
2626 	lan78xx_init_stats(dev);
2627 
2628 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2629 
2630 	netif_start_queue(net);
2631 
2632 	dev->link_on = false;
2633 
2634 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2635 done:
2636 	usb_autopm_put_interface(dev->intf);
2637 
2638 out:
2639 	return ret;
2640 }
2641 
2642 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2643 {
2644 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2645 	DECLARE_WAITQUEUE(wait, current);
2646 	int temp;
2647 
2648 	/* ensure there are no more active urbs */
2649 	add_wait_queue(&unlink_wakeup, &wait);
2650 	set_current_state(TASK_UNINTERRUPTIBLE);
2651 	dev->wait = &unlink_wakeup;
2652 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2653 
2654 	/* maybe wait for deletions to finish. */
2655 	while (!skb_queue_empty(&dev->rxq) &&
2656 	       !skb_queue_empty(&dev->txq) &&
2657 	       !skb_queue_empty(&dev->done)) {
2658 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2659 		set_current_state(TASK_UNINTERRUPTIBLE);
2660 		netif_dbg(dev, ifdown, dev->net,
2661 			  "waited for %d urb completions\n", temp);
2662 	}
2663 	set_current_state(TASK_RUNNING);
2664 	dev->wait = NULL;
2665 	remove_wait_queue(&unlink_wakeup, &wait);
2666 }
2667 
2668 static int lan78xx_stop(struct net_device *net)
2669 {
2670 	struct lan78xx_net *dev = netdev_priv(net);
2671 
2672 	if (timer_pending(&dev->stat_monitor))
2673 		del_timer_sync(&dev->stat_monitor);
2674 
2675 	if (net->phydev)
2676 		phy_stop(net->phydev);
2677 
2678 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2679 	netif_stop_queue(net);
2680 
2681 	netif_info(dev, ifdown, dev->net,
2682 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2683 		   net->stats.rx_packets, net->stats.tx_packets,
2684 		   net->stats.rx_errors, net->stats.tx_errors);
2685 
2686 	lan78xx_terminate_urbs(dev);
2687 
2688 	usb_kill_urb(dev->urb_intr);
2689 
2690 	skb_queue_purge(&dev->rxq_pause);
2691 
2692 	/* deferred work (task, timer, softirq) must also stop.
2693 	 * can't flush_scheduled_work() until we drop rtnl (later),
2694 	 * else workers could deadlock; so make workers a NOP.
2695 	 */
2696 	dev->flags = 0;
2697 	cancel_delayed_work_sync(&dev->wq);
2698 	tasklet_kill(&dev->bh);
2699 
2700 	usb_autopm_put_interface(dev->intf);
2701 
2702 	return 0;
2703 }
2704 
2705 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2706 				       struct sk_buff *skb, gfp_t flags)
2707 {
2708 	u32 tx_cmd_a, tx_cmd_b;
2709 	void *ptr;
2710 
2711 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2712 		dev_kfree_skb_any(skb);
2713 		return NULL;
2714 	}
2715 
2716 	if (skb_linearize(skb)) {
2717 		dev_kfree_skb_any(skb);
2718 		return NULL;
2719 	}
2720 
2721 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2722 
2723 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2724 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2725 
2726 	tx_cmd_b = 0;
2727 	if (skb_is_gso(skb)) {
2728 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2729 
2730 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2731 
2732 		tx_cmd_a |= TX_CMD_A_LSO_;
2733 	}
2734 
2735 	if (skb_vlan_tag_present(skb)) {
2736 		tx_cmd_a |= TX_CMD_A_IVTG_;
2737 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2738 	}
2739 
2740 	ptr = skb_push(skb, 8);
2741 	put_unaligned_le32(tx_cmd_a, ptr);
2742 	put_unaligned_le32(tx_cmd_b, ptr + 4);
2743 
2744 	return skb;
2745 }
2746 
2747 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2748 			       struct sk_buff_head *list, enum skb_state state)
2749 {
2750 	unsigned long flags;
2751 	enum skb_state old_state;
2752 	struct skb_data *entry = (struct skb_data *)skb->cb;
2753 
2754 	spin_lock_irqsave(&list->lock, flags);
2755 	old_state = entry->state;
2756 	entry->state = state;
2757 
2758 	__skb_unlink(skb, list);
2759 	spin_unlock(&list->lock);
2760 	spin_lock(&dev->done.lock);
2761 
2762 	__skb_queue_tail(&dev->done, skb);
2763 	if (skb_queue_len(&dev->done) == 1)
2764 		tasklet_schedule(&dev->bh);
2765 	spin_unlock_irqrestore(&dev->done.lock, flags);
2766 
2767 	return old_state;
2768 }
2769 
2770 static void tx_complete(struct urb *urb)
2771 {
2772 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2773 	struct skb_data *entry = (struct skb_data *)skb->cb;
2774 	struct lan78xx_net *dev = entry->dev;
2775 
2776 	if (urb->status == 0) {
2777 		dev->net->stats.tx_packets += entry->num_of_packet;
2778 		dev->net->stats.tx_bytes += entry->length;
2779 	} else {
2780 		dev->net->stats.tx_errors++;
2781 
2782 		switch (urb->status) {
2783 		case -EPIPE:
2784 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2785 			break;
2786 
2787 		/* software-driven interface shutdown */
2788 		case -ECONNRESET:
2789 		case -ESHUTDOWN:
2790 			break;
2791 
2792 		case -EPROTO:
2793 		case -ETIME:
2794 		case -EILSEQ:
2795 			netif_stop_queue(dev->net);
2796 			break;
2797 		default:
2798 			netif_dbg(dev, tx_err, dev->net,
2799 				  "tx err %d\n", entry->urb->status);
2800 			break;
2801 		}
2802 	}
2803 
2804 	usb_autopm_put_interface_async(dev->intf);
2805 
2806 	defer_bh(dev, skb, &dev->txq, tx_done);
2807 }
2808 
2809 static void lan78xx_queue_skb(struct sk_buff_head *list,
2810 			      struct sk_buff *newsk, enum skb_state state)
2811 {
2812 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2813 
2814 	__skb_queue_tail(list, newsk);
2815 	entry->state = state;
2816 }
2817 
2818 static netdev_tx_t
2819 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2820 {
2821 	struct lan78xx_net *dev = netdev_priv(net);
2822 	struct sk_buff *skb2 = NULL;
2823 
2824 	if (skb) {
2825 		skb_tx_timestamp(skb);
2826 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2827 	}
2828 
2829 	if (skb2) {
2830 		skb_queue_tail(&dev->txq_pend, skb2);
2831 
2832 		/* throttle TX patch at slower than SUPER SPEED USB */
2833 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2834 		    (skb_queue_len(&dev->txq_pend) > 10))
2835 			netif_stop_queue(net);
2836 	} else {
2837 		netif_dbg(dev, tx_err, dev->net,
2838 			  "lan78xx_tx_prep return NULL\n");
2839 		dev->net->stats.tx_errors++;
2840 		dev->net->stats.tx_dropped++;
2841 	}
2842 
2843 	tasklet_schedule(&dev->bh);
2844 
2845 	return NETDEV_TX_OK;
2846 }
2847 
2848 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2849 {
2850 	struct lan78xx_priv *pdata = NULL;
2851 	int ret;
2852 	int i;
2853 
2854 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2855 
2856 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2857 	if (!pdata) {
2858 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2859 		return -ENOMEM;
2860 	}
2861 
2862 	pdata->dev = dev;
2863 
2864 	spin_lock_init(&pdata->rfe_ctl_lock);
2865 	mutex_init(&pdata->dataport_mutex);
2866 
2867 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2868 
2869 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2870 		pdata->vlan_table[i] = 0;
2871 
2872 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2873 
2874 	dev->net->features = 0;
2875 
2876 	if (DEFAULT_TX_CSUM_ENABLE)
2877 		dev->net->features |= NETIF_F_HW_CSUM;
2878 
2879 	if (DEFAULT_RX_CSUM_ENABLE)
2880 		dev->net->features |= NETIF_F_RXCSUM;
2881 
2882 	if (DEFAULT_TSO_CSUM_ENABLE)
2883 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2884 
2885 	if (DEFAULT_VLAN_RX_OFFLOAD)
2886 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2887 
2888 	if (DEFAULT_VLAN_FILTER_ENABLE)
2889 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2890 
2891 	dev->net->hw_features = dev->net->features;
2892 
2893 	ret = lan78xx_setup_irq_domain(dev);
2894 	if (ret < 0) {
2895 		netdev_warn(dev->net,
2896 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2897 		goto out1;
2898 	}
2899 
2900 	dev->net->hard_header_len += TX_OVERHEAD;
2901 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2902 
2903 	/* Init all registers */
2904 	ret = lan78xx_reset(dev);
2905 	if (ret) {
2906 		netdev_warn(dev->net, "Registers INIT FAILED....");
2907 		goto out2;
2908 	}
2909 
2910 	ret = lan78xx_mdio_init(dev);
2911 	if (ret) {
2912 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2913 		goto out2;
2914 	}
2915 
2916 	dev->net->flags |= IFF_MULTICAST;
2917 
2918 	pdata->wol = WAKE_MAGIC;
2919 
2920 	return ret;
2921 
2922 out2:
2923 	lan78xx_remove_irq_domain(dev);
2924 
2925 out1:
2926 	netdev_warn(dev->net, "Bind routine FAILED");
2927 	cancel_work_sync(&pdata->set_multicast);
2928 	cancel_work_sync(&pdata->set_vlan);
2929 	kfree(pdata);
2930 	return ret;
2931 }
2932 
2933 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2934 {
2935 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2936 
2937 	lan78xx_remove_irq_domain(dev);
2938 
2939 	lan78xx_remove_mdio(dev);
2940 
2941 	if (pdata) {
2942 		cancel_work_sync(&pdata->set_multicast);
2943 		cancel_work_sync(&pdata->set_vlan);
2944 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2945 		kfree(pdata);
2946 		pdata = NULL;
2947 		dev->data[0] = 0;
2948 	}
2949 }
2950 
2951 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2952 				    struct sk_buff *skb,
2953 				    u32 rx_cmd_a, u32 rx_cmd_b)
2954 {
2955 	/* HW Checksum offload appears to be flawed if used when not stripping
2956 	 * VLAN headers. Drop back to S/W checksums under these conditions.
2957 	 */
2958 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2959 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2960 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2961 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2962 		skb->ip_summed = CHECKSUM_NONE;
2963 	} else {
2964 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2965 		skb->ip_summed = CHECKSUM_COMPLETE;
2966 	}
2967 }
2968 
2969 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2970 				    struct sk_buff *skb,
2971 				    u32 rx_cmd_a, u32 rx_cmd_b)
2972 {
2973 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2974 	    (rx_cmd_a & RX_CMD_A_FVTG_))
2975 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2976 				       (rx_cmd_b & 0xffff));
2977 }
2978 
2979 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2980 {
2981 	int status;
2982 
2983 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2984 		skb_queue_tail(&dev->rxq_pause, skb);
2985 		return;
2986 	}
2987 
2988 	dev->net->stats.rx_packets++;
2989 	dev->net->stats.rx_bytes += skb->len;
2990 
2991 	skb->protocol = eth_type_trans(skb, dev->net);
2992 
2993 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2994 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2995 	memset(skb->cb, 0, sizeof(struct skb_data));
2996 
2997 	if (skb_defer_rx_timestamp(skb))
2998 		return;
2999 
3000 	status = netif_rx(skb);
3001 	if (status != NET_RX_SUCCESS)
3002 		netif_dbg(dev, rx_err, dev->net,
3003 			  "netif_rx status %d\n", status);
3004 }
3005 
3006 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3007 {
3008 	if (skb->len < dev->net->hard_header_len)
3009 		return 0;
3010 
3011 	while (skb->len > 0) {
3012 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3013 		u16 rx_cmd_c;
3014 		struct sk_buff *skb2;
3015 		unsigned char *packet;
3016 
3017 		rx_cmd_a = get_unaligned_le32(skb->data);
3018 		skb_pull(skb, sizeof(rx_cmd_a));
3019 
3020 		rx_cmd_b = get_unaligned_le32(skb->data);
3021 		skb_pull(skb, sizeof(rx_cmd_b));
3022 
3023 		rx_cmd_c = get_unaligned_le16(skb->data);
3024 		skb_pull(skb, sizeof(rx_cmd_c));
3025 
3026 		packet = skb->data;
3027 
3028 		/* get the packet length */
3029 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3030 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3031 
3032 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3033 			netif_dbg(dev, rx_err, dev->net,
3034 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3035 		} else {
3036 			/* last frame in this batch */
3037 			if (skb->len == size) {
3038 				lan78xx_rx_csum_offload(dev, skb,
3039 							rx_cmd_a, rx_cmd_b);
3040 				lan78xx_rx_vlan_offload(dev, skb,
3041 							rx_cmd_a, rx_cmd_b);
3042 
3043 				skb_trim(skb, skb->len - 4); /* remove fcs */
3044 				skb->truesize = size + sizeof(struct sk_buff);
3045 
3046 				return 1;
3047 			}
3048 
3049 			skb2 = skb_clone(skb, GFP_ATOMIC);
3050 			if (unlikely(!skb2)) {
3051 				netdev_warn(dev->net, "Error allocating skb");
3052 				return 0;
3053 			}
3054 
3055 			skb2->len = size;
3056 			skb2->data = packet;
3057 			skb_set_tail_pointer(skb2, size);
3058 
3059 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3060 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3061 
3062 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3063 			skb2->truesize = size + sizeof(struct sk_buff);
3064 
3065 			lan78xx_skb_return(dev, skb2);
3066 		}
3067 
3068 		skb_pull(skb, size);
3069 
3070 		/* padding bytes before the next frame starts */
3071 		if (skb->len)
3072 			skb_pull(skb, align_count);
3073 	}
3074 
3075 	return 1;
3076 }
3077 
3078 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3079 {
3080 	if (!lan78xx_rx(dev, skb)) {
3081 		dev->net->stats.rx_errors++;
3082 		goto done;
3083 	}
3084 
3085 	if (skb->len) {
3086 		lan78xx_skb_return(dev, skb);
3087 		return;
3088 	}
3089 
3090 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3091 	dev->net->stats.rx_errors++;
3092 done:
3093 	skb_queue_tail(&dev->done, skb);
3094 }
3095 
3096 static void rx_complete(struct urb *urb);
3097 
3098 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3099 {
3100 	struct sk_buff *skb;
3101 	struct skb_data *entry;
3102 	unsigned long lockflags;
3103 	size_t size = dev->rx_urb_size;
3104 	int ret = 0;
3105 
3106 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3107 	if (!skb) {
3108 		usb_free_urb(urb);
3109 		return -ENOMEM;
3110 	}
3111 
3112 	entry = (struct skb_data *)skb->cb;
3113 	entry->urb = urb;
3114 	entry->dev = dev;
3115 	entry->length = 0;
3116 
3117 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3118 			  skb->data, size, rx_complete, skb);
3119 
3120 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3121 
3122 	if (netif_device_present(dev->net) &&
3123 	    netif_running(dev->net) &&
3124 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3125 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3126 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3127 		switch (ret) {
3128 		case 0:
3129 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3130 			break;
3131 		case -EPIPE:
3132 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3133 			break;
3134 		case -ENODEV:
3135 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3136 			netif_device_detach(dev->net);
3137 			break;
3138 		case -EHOSTUNREACH:
3139 			ret = -ENOLINK;
3140 			break;
3141 		default:
3142 			netif_dbg(dev, rx_err, dev->net,
3143 				  "rx submit, %d\n", ret);
3144 			tasklet_schedule(&dev->bh);
3145 		}
3146 	} else {
3147 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3148 		ret = -ENOLINK;
3149 	}
3150 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3151 	if (ret) {
3152 		dev_kfree_skb_any(skb);
3153 		usb_free_urb(urb);
3154 	}
3155 	return ret;
3156 }
3157 
3158 static void rx_complete(struct urb *urb)
3159 {
3160 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3161 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3162 	struct lan78xx_net *dev = entry->dev;
3163 	int urb_status = urb->status;
3164 	enum skb_state state;
3165 
3166 	skb_put(skb, urb->actual_length);
3167 	state = rx_done;
3168 	entry->urb = NULL;
3169 
3170 	switch (urb_status) {
3171 	case 0:
3172 		if (skb->len < dev->net->hard_header_len) {
3173 			state = rx_cleanup;
3174 			dev->net->stats.rx_errors++;
3175 			dev->net->stats.rx_length_errors++;
3176 			netif_dbg(dev, rx_err, dev->net,
3177 				  "rx length %d\n", skb->len);
3178 		}
3179 		usb_mark_last_busy(dev->udev);
3180 		break;
3181 	case -EPIPE:
3182 		dev->net->stats.rx_errors++;
3183 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3184 		fallthrough;
3185 	case -ECONNRESET:				/* async unlink */
3186 	case -ESHUTDOWN:				/* hardware gone */
3187 		netif_dbg(dev, ifdown, dev->net,
3188 			  "rx shutdown, code %d\n", urb_status);
3189 		state = rx_cleanup;
3190 		entry->urb = urb;
3191 		urb = NULL;
3192 		break;
3193 	case -EPROTO:
3194 	case -ETIME:
3195 	case -EILSEQ:
3196 		dev->net->stats.rx_errors++;
3197 		state = rx_cleanup;
3198 		entry->urb = urb;
3199 		urb = NULL;
3200 		break;
3201 
3202 	/* data overrun ... flush fifo? */
3203 	case -EOVERFLOW:
3204 		dev->net->stats.rx_over_errors++;
3205 		fallthrough;
3206 
3207 	default:
3208 		state = rx_cleanup;
3209 		dev->net->stats.rx_errors++;
3210 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3211 		break;
3212 	}
3213 
3214 	state = defer_bh(dev, skb, &dev->rxq, state);
3215 
3216 	if (urb) {
3217 		if (netif_running(dev->net) &&
3218 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3219 		    state != unlink_start) {
3220 			rx_submit(dev, urb, GFP_ATOMIC);
3221 			return;
3222 		}
3223 		usb_free_urb(urb);
3224 	}
3225 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3226 }
3227 
3228 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3229 {
3230 	int length;
3231 	struct urb *urb = NULL;
3232 	struct skb_data *entry;
3233 	unsigned long flags;
3234 	struct sk_buff_head *tqp = &dev->txq_pend;
3235 	struct sk_buff *skb, *skb2;
3236 	int ret;
3237 	int count, pos;
3238 	int skb_totallen, pkt_cnt;
3239 
3240 	skb_totallen = 0;
3241 	pkt_cnt = 0;
3242 	count = 0;
3243 	length = 0;
3244 	spin_lock_irqsave(&tqp->lock, flags);
3245 	skb_queue_walk(tqp, skb) {
3246 		if (skb_is_gso(skb)) {
3247 			if (!skb_queue_is_first(tqp, skb)) {
3248 				/* handle previous packets first */
3249 				break;
3250 			}
3251 			count = 1;
3252 			length = skb->len - TX_OVERHEAD;
3253 			__skb_unlink(skb, tqp);
3254 			spin_unlock_irqrestore(&tqp->lock, flags);
3255 			goto gso_skb;
3256 		}
3257 
3258 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3259 			break;
3260 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3261 		pkt_cnt++;
3262 	}
3263 	spin_unlock_irqrestore(&tqp->lock, flags);
3264 
3265 	/* copy to a single skb */
3266 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3267 	if (!skb)
3268 		goto drop;
3269 
3270 	skb_put(skb, skb_totallen);
3271 
3272 	for (count = pos = 0; count < pkt_cnt; count++) {
3273 		skb2 = skb_dequeue(tqp);
3274 		if (skb2) {
3275 			length += (skb2->len - TX_OVERHEAD);
3276 			memcpy(skb->data + pos, skb2->data, skb2->len);
3277 			pos += roundup(skb2->len, sizeof(u32));
3278 			dev_kfree_skb(skb2);
3279 		}
3280 	}
3281 
3282 gso_skb:
3283 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3284 	if (!urb)
3285 		goto drop;
3286 
3287 	entry = (struct skb_data *)skb->cb;
3288 	entry->urb = urb;
3289 	entry->dev = dev;
3290 	entry->length = length;
3291 	entry->num_of_packet = count;
3292 
3293 	spin_lock_irqsave(&dev->txq.lock, flags);
3294 	ret = usb_autopm_get_interface_async(dev->intf);
3295 	if (ret < 0) {
3296 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3297 		goto drop;
3298 	}
3299 
3300 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3301 			  skb->data, skb->len, tx_complete, skb);
3302 
3303 	if (length % dev->maxpacket == 0) {
3304 		/* send USB_ZERO_PACKET */
3305 		urb->transfer_flags |= URB_ZERO_PACKET;
3306 	}
3307 
3308 #ifdef CONFIG_PM
3309 	/* if this triggers the device is still a sleep */
3310 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3311 		/* transmission will be done in resume */
3312 		usb_anchor_urb(urb, &dev->deferred);
3313 		/* no use to process more packets */
3314 		netif_stop_queue(dev->net);
3315 		usb_put_urb(urb);
3316 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3317 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3318 		return;
3319 	}
3320 #endif
3321 
3322 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3323 	switch (ret) {
3324 	case 0:
3325 		netif_trans_update(dev->net);
3326 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3327 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3328 			netif_stop_queue(dev->net);
3329 		break;
3330 	case -EPIPE:
3331 		netif_stop_queue(dev->net);
3332 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3333 		usb_autopm_put_interface_async(dev->intf);
3334 		break;
3335 	default:
3336 		usb_autopm_put_interface_async(dev->intf);
3337 		netif_dbg(dev, tx_err, dev->net,
3338 			  "tx: submit urb err %d\n", ret);
3339 		break;
3340 	}
3341 
3342 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3343 
3344 	if (ret) {
3345 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3346 drop:
3347 		dev->net->stats.tx_dropped++;
3348 		if (skb)
3349 			dev_kfree_skb_any(skb);
3350 		usb_free_urb(urb);
3351 	} else
3352 		netif_dbg(dev, tx_queued, dev->net,
3353 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3354 }
3355 
3356 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3357 {
3358 	struct urb *urb;
3359 	int i;
3360 
3361 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3362 		for (i = 0; i < 10; i++) {
3363 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3364 				break;
3365 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3366 			if (urb)
3367 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3368 					return;
3369 		}
3370 
3371 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3372 			tasklet_schedule(&dev->bh);
3373 	}
3374 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3375 		netif_wake_queue(dev->net);
3376 }
3377 
3378 static void lan78xx_bh(struct tasklet_struct *t)
3379 {
3380 	struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3381 	struct sk_buff *skb;
3382 	struct skb_data *entry;
3383 
3384 	while ((skb = skb_dequeue(&dev->done))) {
3385 		entry = (struct skb_data *)(skb->cb);
3386 		switch (entry->state) {
3387 		case rx_done:
3388 			entry->state = rx_cleanup;
3389 			rx_process(dev, skb);
3390 			continue;
3391 		case tx_done:
3392 			usb_free_urb(entry->urb);
3393 			dev_kfree_skb(skb);
3394 			continue;
3395 		case rx_cleanup:
3396 			usb_free_urb(entry->urb);
3397 			dev_kfree_skb(skb);
3398 			continue;
3399 		default:
3400 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3401 			return;
3402 		}
3403 	}
3404 
3405 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3406 		/* reset update timer delta */
3407 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3408 			dev->delta = 1;
3409 			mod_timer(&dev->stat_monitor,
3410 				  jiffies + STAT_UPDATE_TIMER);
3411 		}
3412 
3413 		if (!skb_queue_empty(&dev->txq_pend))
3414 			lan78xx_tx_bh(dev);
3415 
3416 		if (!timer_pending(&dev->delay) &&
3417 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3418 			lan78xx_rx_bh(dev);
3419 	}
3420 }
3421 
3422 static void lan78xx_delayedwork(struct work_struct *work)
3423 {
3424 	int status;
3425 	struct lan78xx_net *dev;
3426 
3427 	dev = container_of(work, struct lan78xx_net, wq.work);
3428 
3429 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3430 		unlink_urbs(dev, &dev->txq);
3431 		status = usb_autopm_get_interface(dev->intf);
3432 		if (status < 0)
3433 			goto fail_pipe;
3434 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3435 		usb_autopm_put_interface(dev->intf);
3436 		if (status < 0 &&
3437 		    status != -EPIPE &&
3438 		    status != -ESHUTDOWN) {
3439 			if (netif_msg_tx_err(dev))
3440 fail_pipe:
3441 				netdev_err(dev->net,
3442 					   "can't clear tx halt, status %d\n",
3443 					   status);
3444 		} else {
3445 			clear_bit(EVENT_TX_HALT, &dev->flags);
3446 			if (status != -ESHUTDOWN)
3447 				netif_wake_queue(dev->net);
3448 		}
3449 	}
3450 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3451 		unlink_urbs(dev, &dev->rxq);
3452 		status = usb_autopm_get_interface(dev->intf);
3453 		if (status < 0)
3454 				goto fail_halt;
3455 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3456 		usb_autopm_put_interface(dev->intf);
3457 		if (status < 0 &&
3458 		    status != -EPIPE &&
3459 		    status != -ESHUTDOWN) {
3460 			if (netif_msg_rx_err(dev))
3461 fail_halt:
3462 				netdev_err(dev->net,
3463 					   "can't clear rx halt, status %d\n",
3464 					   status);
3465 		} else {
3466 			clear_bit(EVENT_RX_HALT, &dev->flags);
3467 			tasklet_schedule(&dev->bh);
3468 		}
3469 	}
3470 
3471 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3472 		int ret = 0;
3473 
3474 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3475 		status = usb_autopm_get_interface(dev->intf);
3476 		if (status < 0)
3477 			goto skip_reset;
3478 		if (lan78xx_link_reset(dev) < 0) {
3479 			usb_autopm_put_interface(dev->intf);
3480 skip_reset:
3481 			netdev_info(dev->net, "link reset failed (%d)\n",
3482 				    ret);
3483 		} else {
3484 			usb_autopm_put_interface(dev->intf);
3485 		}
3486 	}
3487 
3488 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3489 		lan78xx_update_stats(dev);
3490 
3491 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3492 
3493 		mod_timer(&dev->stat_monitor,
3494 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3495 
3496 		dev->delta = min((dev->delta * 2), 50);
3497 	}
3498 }
3499 
3500 static void intr_complete(struct urb *urb)
3501 {
3502 	struct lan78xx_net *dev = urb->context;
3503 	int status = urb->status;
3504 
3505 	switch (status) {
3506 	/* success */
3507 	case 0:
3508 		lan78xx_status(dev, urb);
3509 		break;
3510 
3511 	/* software-driven interface shutdown */
3512 	case -ENOENT:			/* urb killed */
3513 	case -ESHUTDOWN:		/* hardware gone */
3514 		netif_dbg(dev, ifdown, dev->net,
3515 			  "intr shutdown, code %d\n", status);
3516 		return;
3517 
3518 	/* NOTE:  not throttling like RX/TX, since this endpoint
3519 	 * already polls infrequently
3520 	 */
3521 	default:
3522 		netdev_dbg(dev->net, "intr status %d\n", status);
3523 		break;
3524 	}
3525 
3526 	if (!netif_running(dev->net))
3527 		return;
3528 
3529 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3530 	status = usb_submit_urb(urb, GFP_ATOMIC);
3531 	if (status != 0)
3532 		netif_err(dev, timer, dev->net,
3533 			  "intr resubmit --> %d\n", status);
3534 }
3535 
3536 static void lan78xx_disconnect(struct usb_interface *intf)
3537 {
3538 	struct lan78xx_net *dev;
3539 	struct usb_device *udev;
3540 	struct net_device *net;
3541 	struct phy_device *phydev;
3542 
3543 	dev = usb_get_intfdata(intf);
3544 	usb_set_intfdata(intf, NULL);
3545 	if (!dev)
3546 		return;
3547 
3548 	udev = interface_to_usbdev(intf);
3549 	net = dev->net;
3550 	phydev = net->phydev;
3551 
3552 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3553 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3554 
3555 	phy_disconnect(net->phydev);
3556 
3557 	if (phy_is_pseudo_fixed_link(phydev))
3558 		fixed_phy_unregister(phydev);
3559 
3560 	unregister_netdev(net);
3561 
3562 	cancel_delayed_work_sync(&dev->wq);
3563 
3564 	usb_scuttle_anchored_urbs(&dev->deferred);
3565 
3566 	lan78xx_unbind(dev, intf);
3567 
3568 	usb_kill_urb(dev->urb_intr);
3569 	usb_free_urb(dev->urb_intr);
3570 
3571 	free_netdev(net);
3572 	usb_put_dev(udev);
3573 }
3574 
3575 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3576 {
3577 	struct lan78xx_net *dev = netdev_priv(net);
3578 
3579 	unlink_urbs(dev, &dev->txq);
3580 	tasklet_schedule(&dev->bh);
3581 }
3582 
3583 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3584 						struct net_device *netdev,
3585 						netdev_features_t features)
3586 {
3587 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3588 		features &= ~NETIF_F_GSO_MASK;
3589 
3590 	features = vlan_features_check(skb, features);
3591 	features = vxlan_features_check(skb, features);
3592 
3593 	return features;
3594 }
3595 
3596 static const struct net_device_ops lan78xx_netdev_ops = {
3597 	.ndo_open		= lan78xx_open,
3598 	.ndo_stop		= lan78xx_stop,
3599 	.ndo_start_xmit		= lan78xx_start_xmit,
3600 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3601 	.ndo_change_mtu		= lan78xx_change_mtu,
3602 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3603 	.ndo_validate_addr	= eth_validate_addr,
3604 	.ndo_do_ioctl		= phy_do_ioctl_running,
3605 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3606 	.ndo_set_features	= lan78xx_set_features,
3607 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3608 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3609 	.ndo_features_check	= lan78xx_features_check,
3610 };
3611 
3612 static void lan78xx_stat_monitor(struct timer_list *t)
3613 {
3614 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3615 
3616 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3617 }
3618 
3619 static int lan78xx_probe(struct usb_interface *intf,
3620 			 const struct usb_device_id *id)
3621 {
3622 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3623 	struct lan78xx_net *dev;
3624 	struct net_device *netdev;
3625 	struct usb_device *udev;
3626 	int ret;
3627 	unsigned maxp;
3628 	unsigned period;
3629 	u8 *buf = NULL;
3630 
3631 	udev = interface_to_usbdev(intf);
3632 	udev = usb_get_dev(udev);
3633 
3634 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3635 	if (!netdev) {
3636 		dev_err(&intf->dev, "Error: OOM\n");
3637 		ret = -ENOMEM;
3638 		goto out1;
3639 	}
3640 
3641 	/* netdev_printk() needs this */
3642 	SET_NETDEV_DEV(netdev, &intf->dev);
3643 
3644 	dev = netdev_priv(netdev);
3645 	dev->udev = udev;
3646 	dev->intf = intf;
3647 	dev->net = netdev;
3648 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3649 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3650 
3651 	skb_queue_head_init(&dev->rxq);
3652 	skb_queue_head_init(&dev->txq);
3653 	skb_queue_head_init(&dev->done);
3654 	skb_queue_head_init(&dev->rxq_pause);
3655 	skb_queue_head_init(&dev->txq_pend);
3656 	mutex_init(&dev->phy_mutex);
3657 
3658 	tasklet_setup(&dev->bh, lan78xx_bh);
3659 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3660 	init_usb_anchor(&dev->deferred);
3661 
3662 	netdev->netdev_ops = &lan78xx_netdev_ops;
3663 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3664 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3665 
3666 	dev->delta = 1;
3667 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3668 
3669 	mutex_init(&dev->stats.access_lock);
3670 
3671 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3672 		ret = -ENODEV;
3673 		goto out2;
3674 	}
3675 
3676 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3677 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3678 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3679 		ret = -ENODEV;
3680 		goto out2;
3681 	}
3682 
3683 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3684 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3685 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3686 		ret = -ENODEV;
3687 		goto out2;
3688 	}
3689 
3690 	ep_intr = &intf->cur_altsetting->endpoint[2];
3691 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3692 		ret = -ENODEV;
3693 		goto out2;
3694 	}
3695 
3696 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3697 					usb_endpoint_num(&ep_intr->desc));
3698 
3699 	ret = lan78xx_bind(dev, intf);
3700 	if (ret < 0)
3701 		goto out2;
3702 
3703 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3704 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3705 
3706 	/* MTU range: 68 - 9000 */
3707 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3708 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3709 
3710 	period = ep_intr->desc.bInterval;
3711 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3712 	buf = kmalloc(maxp, GFP_KERNEL);
3713 	if (buf) {
3714 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3715 		if (!dev->urb_intr) {
3716 			ret = -ENOMEM;
3717 			kfree(buf);
3718 			goto out3;
3719 		} else {
3720 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3721 					 dev->pipe_intr, buf, maxp,
3722 					 intr_complete, dev, period);
3723 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3724 		}
3725 	}
3726 
3727 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3728 
3729 	/* driver requires remote-wakeup capability during autosuspend. */
3730 	intf->needs_remote_wakeup = 1;
3731 
3732 	ret = lan78xx_phy_init(dev);
3733 	if (ret < 0)
3734 		goto out4;
3735 
3736 	ret = register_netdev(netdev);
3737 	if (ret != 0) {
3738 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3739 		goto out5;
3740 	}
3741 
3742 	usb_set_intfdata(intf, dev);
3743 
3744 	ret = device_set_wakeup_enable(&udev->dev, true);
3745 
3746 	 /* Default delay of 2sec has more overhead than advantage.
3747 	  * Set to 10sec as default.
3748 	  */
3749 	pm_runtime_set_autosuspend_delay(&udev->dev,
3750 					 DEFAULT_AUTOSUSPEND_DELAY);
3751 
3752 	return 0;
3753 
3754 out5:
3755 	phy_disconnect(netdev->phydev);
3756 out4:
3757 	usb_free_urb(dev->urb_intr);
3758 out3:
3759 	lan78xx_unbind(dev, intf);
3760 out2:
3761 	free_netdev(netdev);
3762 out1:
3763 	usb_put_dev(udev);
3764 
3765 	return ret;
3766 }
3767 
3768 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3769 {
3770 	const u16 crc16poly = 0x8005;
3771 	int i;
3772 	u16 bit, crc, msb;
3773 	u8 data;
3774 
3775 	crc = 0xFFFF;
3776 	for (i = 0; i < len; i++) {
3777 		data = *buf++;
3778 		for (bit = 0; bit < 8; bit++) {
3779 			msb = crc >> 15;
3780 			crc <<= 1;
3781 
3782 			if (msb ^ (u16)(data & 1)) {
3783 				crc ^= crc16poly;
3784 				crc |= (u16)0x0001U;
3785 			}
3786 			data >>= 1;
3787 		}
3788 	}
3789 
3790 	return crc;
3791 }
3792 
3793 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3794 {
3795 	u32 buf;
3796 	int mask_index;
3797 	u16 crc;
3798 	u32 temp_wucsr;
3799 	u32 temp_pmt_ctl;
3800 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3801 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3802 	const u8 arp_type[2] = { 0x08, 0x06 };
3803 
3804 	lan78xx_read_reg(dev, MAC_TX, &buf);
3805 	buf &= ~MAC_TX_TXEN_;
3806 	lan78xx_write_reg(dev, MAC_TX, buf);
3807 	lan78xx_read_reg(dev, MAC_RX, &buf);
3808 	buf &= ~MAC_RX_RXEN_;
3809 	lan78xx_write_reg(dev, MAC_RX, buf);
3810 
3811 	lan78xx_write_reg(dev, WUCSR, 0);
3812 	lan78xx_write_reg(dev, WUCSR2, 0);
3813 	lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3814 
3815 	temp_wucsr = 0;
3816 
3817 	temp_pmt_ctl = 0;
3818 	lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3819 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3820 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3821 
3822 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3823 		lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3824 
3825 	mask_index = 0;
3826 	if (wol & WAKE_PHY) {
3827 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3828 
3829 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3830 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3831 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3832 	}
3833 	if (wol & WAKE_MAGIC) {
3834 		temp_wucsr |= WUCSR_MPEN_;
3835 
3836 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3837 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3838 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3839 	}
3840 	if (wol & WAKE_BCAST) {
3841 		temp_wucsr |= WUCSR_BCST_EN_;
3842 
3843 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3844 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3845 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3846 	}
3847 	if (wol & WAKE_MCAST) {
3848 		temp_wucsr |= WUCSR_WAKE_EN_;
3849 
3850 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3851 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3852 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3853 					WUF_CFGX_EN_ |
3854 					WUF_CFGX_TYPE_MCAST_ |
3855 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3856 					(crc & WUF_CFGX_CRC16_MASK_));
3857 
3858 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3859 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3860 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3861 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3862 		mask_index++;
3863 
3864 		/* for IPv6 Multicast */
3865 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3866 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3867 					WUF_CFGX_EN_ |
3868 					WUF_CFGX_TYPE_MCAST_ |
3869 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3870 					(crc & WUF_CFGX_CRC16_MASK_));
3871 
3872 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3873 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3874 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3875 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3876 		mask_index++;
3877 
3878 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3879 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3880 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3881 	}
3882 	if (wol & WAKE_UCAST) {
3883 		temp_wucsr |= WUCSR_PFDA_EN_;
3884 
3885 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3886 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3887 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3888 	}
3889 	if (wol & WAKE_ARP) {
3890 		temp_wucsr |= WUCSR_WAKE_EN_;
3891 
3892 		/* set WUF_CFG & WUF_MASK
3893 		 * for packettype (offset 12,13) = ARP (0x0806)
3894 		 */
3895 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3896 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3897 					WUF_CFGX_EN_ |
3898 					WUF_CFGX_TYPE_ALL_ |
3899 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3900 					(crc & WUF_CFGX_CRC16_MASK_));
3901 
3902 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3903 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3904 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3905 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3906 		mask_index++;
3907 
3908 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3909 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3910 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3911 	}
3912 
3913 	lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3914 
3915 	/* when multiple WOL bits are set */
3916 	if (hweight_long((unsigned long)wol) > 1) {
3917 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3918 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3919 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3920 	}
3921 	lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3922 
3923 	/* clear WUPS */
3924 	lan78xx_read_reg(dev, PMT_CTL, &buf);
3925 	buf |= PMT_CTL_WUPS_MASK_;
3926 	lan78xx_write_reg(dev, PMT_CTL, buf);
3927 
3928 	lan78xx_read_reg(dev, MAC_RX, &buf);
3929 	buf |= MAC_RX_RXEN_;
3930 	lan78xx_write_reg(dev, MAC_RX, buf);
3931 
3932 	return 0;
3933 }
3934 
3935 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3936 {
3937 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3938 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3939 	u32 buf;
3940 	int ret;
3941 
3942 	if (!dev->suspend_count++) {
3943 		spin_lock_irq(&dev->txq.lock);
3944 		/* don't autosuspend while transmitting */
3945 		if ((skb_queue_len(&dev->txq) ||
3946 		     skb_queue_len(&dev->txq_pend)) &&
3947 			PMSG_IS_AUTO(message)) {
3948 			spin_unlock_irq(&dev->txq.lock);
3949 			ret = -EBUSY;
3950 			goto out;
3951 		} else {
3952 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3953 			spin_unlock_irq(&dev->txq.lock);
3954 		}
3955 
3956 		/* stop TX & RX */
3957 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3958 		buf &= ~MAC_TX_TXEN_;
3959 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3960 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3961 		buf &= ~MAC_RX_RXEN_;
3962 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3963 
3964 		/* empty out the rx and queues */
3965 		netif_device_detach(dev->net);
3966 		lan78xx_terminate_urbs(dev);
3967 		usb_kill_urb(dev->urb_intr);
3968 
3969 		/* reattach */
3970 		netif_device_attach(dev->net);
3971 	}
3972 
3973 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3974 		del_timer(&dev->stat_monitor);
3975 
3976 		if (PMSG_IS_AUTO(message)) {
3977 			/* auto suspend (selective suspend) */
3978 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3979 			buf &= ~MAC_TX_TXEN_;
3980 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3981 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3982 			buf &= ~MAC_RX_RXEN_;
3983 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3984 
3985 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3986 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3987 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3988 
3989 			/* set goodframe wakeup */
3990 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3991 
3992 			buf |= WUCSR_RFE_WAKE_EN_;
3993 			buf |= WUCSR_STORE_WAKE_;
3994 
3995 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3996 
3997 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3998 
3999 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4000 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4001 
4002 			buf |= PMT_CTL_PHY_WAKE_EN_;
4003 			buf |= PMT_CTL_WOL_EN_;
4004 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4005 			buf |= PMT_CTL_SUS_MODE_3_;
4006 
4007 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4008 
4009 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4010 
4011 			buf |= PMT_CTL_WUPS_MASK_;
4012 
4013 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4014 
4015 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4016 			buf |= MAC_RX_RXEN_;
4017 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4018 		} else {
4019 			lan78xx_set_suspend(dev, pdata->wol);
4020 		}
4021 	}
4022 
4023 	ret = 0;
4024 out:
4025 	return ret;
4026 }
4027 
4028 static int lan78xx_resume(struct usb_interface *intf)
4029 {
4030 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4031 	struct sk_buff *skb;
4032 	struct urb *res;
4033 	int ret;
4034 	u32 buf;
4035 
4036 	if (!timer_pending(&dev->stat_monitor)) {
4037 		dev->delta = 1;
4038 		mod_timer(&dev->stat_monitor,
4039 			  jiffies + STAT_UPDATE_TIMER);
4040 	}
4041 
4042 	if (!--dev->suspend_count) {
4043 		/* resume interrupt URBs */
4044 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4045 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4046 
4047 		spin_lock_irq(&dev->txq.lock);
4048 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4049 			skb = (struct sk_buff *)res->context;
4050 			ret = usb_submit_urb(res, GFP_ATOMIC);
4051 			if (ret < 0) {
4052 				dev_kfree_skb_any(skb);
4053 				usb_free_urb(res);
4054 				usb_autopm_put_interface_async(dev->intf);
4055 			} else {
4056 				netif_trans_update(dev->net);
4057 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4058 			}
4059 		}
4060 
4061 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4062 		spin_unlock_irq(&dev->txq.lock);
4063 
4064 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4065 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4066 				netif_start_queue(dev->net);
4067 			tasklet_schedule(&dev->bh);
4068 		}
4069 	}
4070 
4071 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4072 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4073 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4074 
4075 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4076 					     WUCSR2_ARP_RCD_ |
4077 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4078 					     WUCSR2_IPV4_TCPSYN_RCD_);
4079 
4080 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4081 					    WUCSR_EEE_RX_WAKE_ |
4082 					    WUCSR_PFDA_FR_ |
4083 					    WUCSR_RFE_WAKE_FR_ |
4084 					    WUCSR_WUFR_ |
4085 					    WUCSR_MPR_ |
4086 					    WUCSR_BCST_FR_);
4087 
4088 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4089 	buf |= MAC_TX_TXEN_;
4090 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4091 
4092 	return 0;
4093 }
4094 
4095 static int lan78xx_reset_resume(struct usb_interface *intf)
4096 {
4097 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4098 
4099 	lan78xx_reset(dev);
4100 
4101 	phy_start(dev->net->phydev);
4102 
4103 	return lan78xx_resume(intf);
4104 }
4105 
4106 static const struct usb_device_id products[] = {
4107 	{
4108 	/* LAN7800 USB Gigabit Ethernet Device */
4109 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4110 	},
4111 	{
4112 	/* LAN7850 USB Gigabit Ethernet Device */
4113 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4114 	},
4115 	{
4116 	/* LAN7801 USB Gigabit Ethernet Device */
4117 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4118 	},
4119 	{},
4120 };
4121 MODULE_DEVICE_TABLE(usb, products);
4122 
4123 static struct usb_driver lan78xx_driver = {
4124 	.name			= DRIVER_NAME,
4125 	.id_table		= products,
4126 	.probe			= lan78xx_probe,
4127 	.disconnect		= lan78xx_disconnect,
4128 	.suspend		= lan78xx_suspend,
4129 	.resume			= lan78xx_resume,
4130 	.reset_resume		= lan78xx_reset_resume,
4131 	.supports_autosuspend	= 1,
4132 	.disable_hub_initiated_lpm = 1,
4133 };
4134 
4135 module_usb_driver(lan78xx_driver);
4136 
4137 MODULE_AUTHOR(DRIVER_AUTHOR);
4138 MODULE_DESCRIPTION(DRIVER_DESC);
4139 MODULE_LICENSE("GPL");
4140