xref: /linux/drivers/net/usb/lan78xx.c (revision f412eed9dfdeeb6becd7de2ffe8b5d0a8b3f81ca)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43 
44 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME	"lan78xx"
47 #define DRIVER_VERSION	"1.0.6"
48 
49 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
50 #define THROTTLE_JIFFIES		(HZ / 8)
51 #define UNLINK_TIMEOUT_MS		3
52 
53 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
54 
55 #define SS_USB_PKT_SIZE			(1024)
56 #define HS_USB_PKT_SIZE			(512)
57 #define FS_USB_PKT_SIZE			(64)
58 
59 #define MAX_RX_FIFO_SIZE		(12 * 1024)
60 #define MAX_TX_FIFO_SIZE		(12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY		(0x0800)
63 #define MAX_SINGLE_PACKET_SIZE		(9000)
64 #define DEFAULT_TX_CSUM_ENABLE		(true)
65 #define DEFAULT_RX_CSUM_ENABLE		(true)
66 #define DEFAULT_TSO_CSUM_ENABLE		(true)
67 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
68 #define TX_OVERHEAD			(8)
69 #define RXW_PADDING			2
70 
71 #define LAN78XX_USB_VENDOR_ID		(0x0424)
72 #define LAN7800_USB_PRODUCT_ID		(0x7800)
73 #define LAN7850_USB_PRODUCT_ID		(0x7850)
74 #define LAN7801_USB_PRODUCT_ID		(0x7801)
75 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
76 #define LAN78XX_OTP_MAGIC		(0x78F3)
77 
78 #define	MII_READ			1
79 #define	MII_WRITE			0
80 
81 #define EEPROM_INDICATOR		(0xA5)
82 #define EEPROM_MAC_OFFSET		(0x01)
83 #define MAX_EEPROM_SIZE			512
84 #define OTP_INDICATOR_1			(0xF3)
85 #define OTP_INDICATOR_2			(0xF7)
86 
87 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
88 					 WAKE_MCAST | WAKE_BCAST | \
89 					 WAKE_ARP | WAKE_MAGIC)
90 
91 /* USB related defines */
92 #define BULK_IN_PIPE			1
93 #define BULK_OUT_PIPE			2
94 
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
97 
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER		(1 * 1000)
100 
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP			(32)
103 #define INT_EP_INTEP			(31)
104 #define INT_EP_OTP_WR_DONE		(28)
105 #define INT_EP_EEE_TX_LPI_START		(26)
106 #define INT_EP_EEE_TX_LPI_STOP		(25)
107 #define INT_EP_EEE_RX_LPI		(24)
108 #define INT_EP_MAC_RESET_TIMEOUT	(23)
109 #define INT_EP_RDFO			(22)
110 #define INT_EP_TXE			(21)
111 #define INT_EP_USB_STATUS		(20)
112 #define INT_EP_TX_DIS			(19)
113 #define INT_EP_RX_DIS			(18)
114 #define INT_EP_PHY			(17)
115 #define INT_EP_DP			(16)
116 #define INT_EP_MAC_ERR			(15)
117 #define INT_EP_TDFU			(14)
118 #define INT_EP_TDFO			(13)
119 #define INT_EP_UTX			(12)
120 #define INT_EP_GPIO_11			(11)
121 #define INT_EP_GPIO_10			(10)
122 #define INT_EP_GPIO_9			(9)
123 #define INT_EP_GPIO_8			(8)
124 #define INT_EP_GPIO_7			(7)
125 #define INT_EP_GPIO_6			(6)
126 #define INT_EP_GPIO_5			(5)
127 #define INT_EP_GPIO_4			(4)
128 #define INT_EP_GPIO_3			(3)
129 #define INT_EP_GPIO_2			(2)
130 #define INT_EP_GPIO_1			(1)
131 #define INT_EP_GPIO_0			(0)
132 
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134 	"RX FCS Errors",
135 	"RX Alignment Errors",
136 	"Rx Fragment Errors",
137 	"RX Jabber Errors",
138 	"RX Undersize Frame Errors",
139 	"RX Oversize Frame Errors",
140 	"RX Dropped Frames",
141 	"RX Unicast Byte Count",
142 	"RX Broadcast Byte Count",
143 	"RX Multicast Byte Count",
144 	"RX Unicast Frames",
145 	"RX Broadcast Frames",
146 	"RX Multicast Frames",
147 	"RX Pause Frames",
148 	"RX 64 Byte Frames",
149 	"RX 65 - 127 Byte Frames",
150 	"RX 128 - 255 Byte Frames",
151 	"RX 256 - 511 Bytes Frames",
152 	"RX 512 - 1023 Byte Frames",
153 	"RX 1024 - 1518 Byte Frames",
154 	"RX Greater 1518 Byte Frames",
155 	"EEE RX LPI Transitions",
156 	"EEE RX LPI Time",
157 	"TX FCS Errors",
158 	"TX Excess Deferral Errors",
159 	"TX Carrier Errors",
160 	"TX Bad Byte Count",
161 	"TX Single Collisions",
162 	"TX Multiple Collisions",
163 	"TX Excessive Collision",
164 	"TX Late Collisions",
165 	"TX Unicast Byte Count",
166 	"TX Broadcast Byte Count",
167 	"TX Multicast Byte Count",
168 	"TX Unicast Frames",
169 	"TX Broadcast Frames",
170 	"TX Multicast Frames",
171 	"TX Pause Frames",
172 	"TX 64 Byte Frames",
173 	"TX 65 - 127 Byte Frames",
174 	"TX 128 - 255 Byte Frames",
175 	"TX 256 - 511 Bytes Frames",
176 	"TX 512 - 1023 Byte Frames",
177 	"TX 1024 - 1518 Byte Frames",
178 	"TX Greater 1518 Byte Frames",
179 	"EEE TX LPI Transitions",
180 	"EEE TX LPI Time",
181 };
182 
183 struct lan78xx_statstage {
184 	u32 rx_fcs_errors;
185 	u32 rx_alignment_errors;
186 	u32 rx_fragment_errors;
187 	u32 rx_jabber_errors;
188 	u32 rx_undersize_frame_errors;
189 	u32 rx_oversize_frame_errors;
190 	u32 rx_dropped_frames;
191 	u32 rx_unicast_byte_count;
192 	u32 rx_broadcast_byte_count;
193 	u32 rx_multicast_byte_count;
194 	u32 rx_unicast_frames;
195 	u32 rx_broadcast_frames;
196 	u32 rx_multicast_frames;
197 	u32 rx_pause_frames;
198 	u32 rx_64_byte_frames;
199 	u32 rx_65_127_byte_frames;
200 	u32 rx_128_255_byte_frames;
201 	u32 rx_256_511_bytes_frames;
202 	u32 rx_512_1023_byte_frames;
203 	u32 rx_1024_1518_byte_frames;
204 	u32 rx_greater_1518_byte_frames;
205 	u32 eee_rx_lpi_transitions;
206 	u32 eee_rx_lpi_time;
207 	u32 tx_fcs_errors;
208 	u32 tx_excess_deferral_errors;
209 	u32 tx_carrier_errors;
210 	u32 tx_bad_byte_count;
211 	u32 tx_single_collisions;
212 	u32 tx_multiple_collisions;
213 	u32 tx_excessive_collision;
214 	u32 tx_late_collisions;
215 	u32 tx_unicast_byte_count;
216 	u32 tx_broadcast_byte_count;
217 	u32 tx_multicast_byte_count;
218 	u32 tx_unicast_frames;
219 	u32 tx_broadcast_frames;
220 	u32 tx_multicast_frames;
221 	u32 tx_pause_frames;
222 	u32 tx_64_byte_frames;
223 	u32 tx_65_127_byte_frames;
224 	u32 tx_128_255_byte_frames;
225 	u32 tx_256_511_bytes_frames;
226 	u32 tx_512_1023_byte_frames;
227 	u32 tx_1024_1518_byte_frames;
228 	u32 tx_greater_1518_byte_frames;
229 	u32 eee_tx_lpi_transitions;
230 	u32 eee_tx_lpi_time;
231 };
232 
233 struct lan78xx_statstage64 {
234 	u64 rx_fcs_errors;
235 	u64 rx_alignment_errors;
236 	u64 rx_fragment_errors;
237 	u64 rx_jabber_errors;
238 	u64 rx_undersize_frame_errors;
239 	u64 rx_oversize_frame_errors;
240 	u64 rx_dropped_frames;
241 	u64 rx_unicast_byte_count;
242 	u64 rx_broadcast_byte_count;
243 	u64 rx_multicast_byte_count;
244 	u64 rx_unicast_frames;
245 	u64 rx_broadcast_frames;
246 	u64 rx_multicast_frames;
247 	u64 rx_pause_frames;
248 	u64 rx_64_byte_frames;
249 	u64 rx_65_127_byte_frames;
250 	u64 rx_128_255_byte_frames;
251 	u64 rx_256_511_bytes_frames;
252 	u64 rx_512_1023_byte_frames;
253 	u64 rx_1024_1518_byte_frames;
254 	u64 rx_greater_1518_byte_frames;
255 	u64 eee_rx_lpi_transitions;
256 	u64 eee_rx_lpi_time;
257 	u64 tx_fcs_errors;
258 	u64 tx_excess_deferral_errors;
259 	u64 tx_carrier_errors;
260 	u64 tx_bad_byte_count;
261 	u64 tx_single_collisions;
262 	u64 tx_multiple_collisions;
263 	u64 tx_excessive_collision;
264 	u64 tx_late_collisions;
265 	u64 tx_unicast_byte_count;
266 	u64 tx_broadcast_byte_count;
267 	u64 tx_multicast_byte_count;
268 	u64 tx_unicast_frames;
269 	u64 tx_broadcast_frames;
270 	u64 tx_multicast_frames;
271 	u64 tx_pause_frames;
272 	u64 tx_64_byte_frames;
273 	u64 tx_65_127_byte_frames;
274 	u64 tx_128_255_byte_frames;
275 	u64 tx_256_511_bytes_frames;
276 	u64 tx_512_1023_byte_frames;
277 	u64 tx_1024_1518_byte_frames;
278 	u64 tx_greater_1518_byte_frames;
279 	u64 eee_tx_lpi_transitions;
280 	u64 eee_tx_lpi_time;
281 };
282 
283 static u32 lan78xx_regs[] = {
284 	ID_REV,
285 	INT_STS,
286 	HW_CFG,
287 	PMT_CTL,
288 	E2P_CMD,
289 	E2P_DATA,
290 	USB_STATUS,
291 	VLAN_TYPE,
292 	MAC_CR,
293 	MAC_RX,
294 	MAC_TX,
295 	FLOW,
296 	ERR_STS,
297 	MII_ACC,
298 	MII_DATA,
299 	EEE_TX_LPI_REQ_DLY,
300 	EEE_TW_TX_SYS,
301 	EEE_TX_LPI_REM_DLY,
302 	WUCSR
303 };
304 
305 #define PHY_REG_SIZE (32 * sizeof(u32))
306 
307 struct lan78xx_net;
308 
309 struct lan78xx_priv {
310 	struct lan78xx_net *dev;
311 	u32 rfe_ctl;
312 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
313 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
314 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
315 	struct mutex dataport_mutex; /* for dataport access */
316 	spinlock_t rfe_ctl_lock; /* for rfe register access */
317 	struct work_struct set_multicast;
318 	struct work_struct set_vlan;
319 	u32 wol;
320 };
321 
322 enum skb_state {
323 	illegal = 0,
324 	tx_start,
325 	tx_done,
326 	rx_start,
327 	rx_done,
328 	rx_cleanup,
329 	unlink_start
330 };
331 
332 struct skb_data {		/* skb->cb is one of these */
333 	struct urb *urb;
334 	struct lan78xx_net *dev;
335 	enum skb_state state;
336 	size_t length;
337 	int num_of_packet;
338 };
339 
340 struct usb_context {
341 	struct usb_ctrlrequest req;
342 	struct lan78xx_net *dev;
343 };
344 
345 #define EVENT_TX_HALT			0
346 #define EVENT_RX_HALT			1
347 #define EVENT_RX_MEMORY			2
348 #define EVENT_STS_SPLIT			3
349 #define EVENT_LINK_RESET		4
350 #define EVENT_RX_PAUSED			5
351 #define EVENT_DEV_WAKING		6
352 #define EVENT_DEV_ASLEEP		7
353 #define EVENT_DEV_OPEN			8
354 #define EVENT_STAT_UPDATE		9
355 
356 struct statstage {
357 	struct mutex			access_lock;	/* for stats access */
358 	struct lan78xx_statstage	saved;
359 	struct lan78xx_statstage	rollover_count;
360 	struct lan78xx_statstage	rollover_max;
361 	struct lan78xx_statstage64	curr_stat;
362 };
363 
364 struct irq_domain_data {
365 	struct irq_domain	*irqdomain;
366 	unsigned int		phyirq;
367 	struct irq_chip		*irqchip;
368 	irq_flow_handler_t	irq_handler;
369 	u32			irqenable;
370 	struct mutex		irq_lock;		/* for irq bus access */
371 };
372 
373 struct lan78xx_net {
374 	struct net_device	*net;
375 	struct usb_device	*udev;
376 	struct usb_interface	*intf;
377 	void			*driver_priv;
378 
379 	int			rx_qlen;
380 	int			tx_qlen;
381 	struct sk_buff_head	rxq;
382 	struct sk_buff_head	txq;
383 	struct sk_buff_head	done;
384 	struct sk_buff_head	rxq_pause;
385 	struct sk_buff_head	txq_pend;
386 
387 	struct tasklet_struct	bh;
388 	struct delayed_work	wq;
389 
390 	struct usb_host_endpoint *ep_blkin;
391 	struct usb_host_endpoint *ep_blkout;
392 	struct usb_host_endpoint *ep_intr;
393 
394 	int			msg_enable;
395 
396 	struct urb		*urb_intr;
397 	struct usb_anchor	deferred;
398 
399 	struct mutex		phy_mutex; /* for phy access */
400 	unsigned		pipe_in, pipe_out, pipe_intr;
401 
402 	u32			hard_mtu;	/* count any extra framing */
403 	size_t			rx_urb_size;	/* size for rx urbs */
404 
405 	unsigned long		flags;
406 
407 	wait_queue_head_t	*wait;
408 	unsigned char		suspend_count;
409 
410 	unsigned		maxpacket;
411 	struct timer_list	delay;
412 	struct timer_list	stat_monitor;
413 
414 	unsigned long		data[5];
415 
416 	int			link_on;
417 	u8			mdix_ctrl;
418 
419 	u32			chipid;
420 	u32			chiprev;
421 	struct mii_bus		*mdiobus;
422 	phy_interface_t		interface;
423 
424 	int			fc_autoneg;
425 	u8			fc_request_control;
426 
427 	int			delta;
428 	struct statstage	stats;
429 
430 	struct irq_domain_data	domain_data;
431 };
432 
433 /* define external phy id */
434 #define	PHY_LAN8835			(0x0007C130)
435 #define	PHY_KSZ9031RNX			(0x00221620)
436 
437 /* use ethtool to change the level for any given device */
438 static int msg_level = -1;
439 module_param(msg_level, int, 0);
440 MODULE_PARM_DESC(msg_level, "Override default message level");
441 
442 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
443 {
444 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445 	int ret;
446 
447 	if (!buf)
448 		return -ENOMEM;
449 
450 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
451 			      USB_VENDOR_REQUEST_READ_REGISTER,
452 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
453 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
454 	if (likely(ret >= 0)) {
455 		le32_to_cpus(buf);
456 		*data = *buf;
457 	} else {
458 		netdev_warn(dev->net,
459 			    "Failed to read register index 0x%08x. ret = %d",
460 			    index, ret);
461 	}
462 
463 	kfree(buf);
464 
465 	return ret;
466 }
467 
468 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
469 {
470 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
471 	int ret;
472 
473 	if (!buf)
474 		return -ENOMEM;
475 
476 	*buf = data;
477 	cpu_to_le32s(buf);
478 
479 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
480 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
481 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
482 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
483 	if (unlikely(ret < 0)) {
484 		netdev_warn(dev->net,
485 			    "Failed to write register index 0x%08x. ret = %d",
486 			    index, ret);
487 	}
488 
489 	kfree(buf);
490 
491 	return ret;
492 }
493 
494 static int lan78xx_read_stats(struct lan78xx_net *dev,
495 			      struct lan78xx_statstage *data)
496 {
497 	int ret = 0;
498 	int i;
499 	struct lan78xx_statstage *stats;
500 	u32 *src;
501 	u32 *dst;
502 
503 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
504 	if (!stats)
505 		return -ENOMEM;
506 
507 	ret = usb_control_msg(dev->udev,
508 			      usb_rcvctrlpipe(dev->udev, 0),
509 			      USB_VENDOR_REQUEST_GET_STATS,
510 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
511 			      0,
512 			      0,
513 			      (void *)stats,
514 			      sizeof(*stats),
515 			      USB_CTRL_SET_TIMEOUT);
516 	if (likely(ret >= 0)) {
517 		src = (u32 *)stats;
518 		dst = (u32 *)data;
519 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
520 			le32_to_cpus(&src[i]);
521 			dst[i] = src[i];
522 		}
523 	} else {
524 		netdev_warn(dev->net,
525 			    "Failed to read stat ret = 0x%x", ret);
526 	}
527 
528 	kfree(stats);
529 
530 	return ret;
531 }
532 
533 #define check_counter_rollover(struct1, dev_stats, member) {	\
534 	if (struct1->member < dev_stats.saved.member)		\
535 		dev_stats.rollover_count.member++;		\
536 	}
537 
538 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
539 					struct lan78xx_statstage *stats)
540 {
541 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
542 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
543 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
544 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
545 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
546 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
547 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
548 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
549 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
550 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
551 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
552 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
553 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
554 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
555 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
556 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
557 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
558 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
559 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
560 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
561 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
562 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
563 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
564 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
565 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
566 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
567 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
568 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
569 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
570 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
571 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
572 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
573 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
574 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
575 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
576 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
577 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
578 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
579 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
580 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
581 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
582 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
583 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
584 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
585 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
586 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
587 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
588 
589 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
590 }
591 
592 static void lan78xx_update_stats(struct lan78xx_net *dev)
593 {
594 	u32 *p, *count, *max;
595 	u64 *data;
596 	int i;
597 	struct lan78xx_statstage lan78xx_stats;
598 
599 	if (usb_autopm_get_interface(dev->intf) < 0)
600 		return;
601 
602 	p = (u32 *)&lan78xx_stats;
603 	count = (u32 *)&dev->stats.rollover_count;
604 	max = (u32 *)&dev->stats.rollover_max;
605 	data = (u64 *)&dev->stats.curr_stat;
606 
607 	mutex_lock(&dev->stats.access_lock);
608 
609 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
610 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
611 
612 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
613 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
614 
615 	mutex_unlock(&dev->stats.access_lock);
616 
617 	usb_autopm_put_interface(dev->intf);
618 }
619 
620 /* Loop until the read is completed with timeout called with phy_mutex held */
621 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
622 {
623 	unsigned long start_time = jiffies;
624 	u32 val;
625 	int ret;
626 
627 	do {
628 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
629 		if (unlikely(ret < 0))
630 			return -EIO;
631 
632 		if (!(val & MII_ACC_MII_BUSY_))
633 			return 0;
634 	} while (!time_after(jiffies, start_time + HZ));
635 
636 	return -EIO;
637 }
638 
639 static inline u32 mii_access(int id, int index, int read)
640 {
641 	u32 ret;
642 
643 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
644 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
645 	if (read)
646 		ret |= MII_ACC_MII_READ_;
647 	else
648 		ret |= MII_ACC_MII_WRITE_;
649 	ret |= MII_ACC_MII_BUSY_;
650 
651 	return ret;
652 }
653 
654 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
655 {
656 	unsigned long start_time = jiffies;
657 	u32 val;
658 	int ret;
659 
660 	do {
661 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
662 		if (unlikely(ret < 0))
663 			return -EIO;
664 
665 		if (!(val & E2P_CMD_EPC_BUSY_) ||
666 		    (val & E2P_CMD_EPC_TIMEOUT_))
667 			break;
668 		usleep_range(40, 100);
669 	} while (!time_after(jiffies, start_time + HZ));
670 
671 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
672 		netdev_warn(dev->net, "EEPROM read operation timeout");
673 		return -EIO;
674 	}
675 
676 	return 0;
677 }
678 
679 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
680 {
681 	unsigned long start_time = jiffies;
682 	u32 val;
683 	int ret;
684 
685 	do {
686 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
687 		if (unlikely(ret < 0))
688 			return -EIO;
689 
690 		if (!(val & E2P_CMD_EPC_BUSY_))
691 			return 0;
692 
693 		usleep_range(40, 100);
694 	} while (!time_after(jiffies, start_time + HZ));
695 
696 	netdev_warn(dev->net, "EEPROM is busy");
697 	return -EIO;
698 }
699 
700 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
701 				   u32 length, u8 *data)
702 {
703 	u32 val;
704 	u32 saved;
705 	int i, ret;
706 	int retval;
707 
708 	/* depends on chip, some EEPROM pins are muxed with LED function.
709 	 * disable & restore LED function to access EEPROM.
710 	 */
711 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
712 	saved = val;
713 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
714 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
715 		ret = lan78xx_write_reg(dev, HW_CFG, val);
716 	}
717 
718 	retval = lan78xx_eeprom_confirm_not_busy(dev);
719 	if (retval)
720 		return retval;
721 
722 	for (i = 0; i < length; i++) {
723 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
724 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
725 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
726 		if (unlikely(ret < 0)) {
727 			retval = -EIO;
728 			goto exit;
729 		}
730 
731 		retval = lan78xx_wait_eeprom(dev);
732 		if (retval < 0)
733 			goto exit;
734 
735 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
736 		if (unlikely(ret < 0)) {
737 			retval = -EIO;
738 			goto exit;
739 		}
740 
741 		data[i] = val & 0xFF;
742 		offset++;
743 	}
744 
745 	retval = 0;
746 exit:
747 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
748 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
749 
750 	return retval;
751 }
752 
753 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
754 			       u32 length, u8 *data)
755 {
756 	u8 sig;
757 	int ret;
758 
759 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
760 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
761 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
762 	else
763 		ret = -EINVAL;
764 
765 	return ret;
766 }
767 
768 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
769 				    u32 length, u8 *data)
770 {
771 	u32 val;
772 	u32 saved;
773 	int i, ret;
774 	int retval;
775 
776 	/* depends on chip, some EEPROM pins are muxed with LED function.
777 	 * disable & restore LED function to access EEPROM.
778 	 */
779 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
780 	saved = val;
781 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
782 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
783 		ret = lan78xx_write_reg(dev, HW_CFG, val);
784 	}
785 
786 	retval = lan78xx_eeprom_confirm_not_busy(dev);
787 	if (retval)
788 		goto exit;
789 
790 	/* Issue write/erase enable command */
791 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
792 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
793 	if (unlikely(ret < 0)) {
794 		retval = -EIO;
795 		goto exit;
796 	}
797 
798 	retval = lan78xx_wait_eeprom(dev);
799 	if (retval < 0)
800 		goto exit;
801 
802 	for (i = 0; i < length; i++) {
803 		/* Fill data register */
804 		val = data[i];
805 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
806 		if (ret < 0) {
807 			retval = -EIO;
808 			goto exit;
809 		}
810 
811 		/* Send "write" command */
812 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
813 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
814 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
815 		if (ret < 0) {
816 			retval = -EIO;
817 			goto exit;
818 		}
819 
820 		retval = lan78xx_wait_eeprom(dev);
821 		if (retval < 0)
822 			goto exit;
823 
824 		offset++;
825 	}
826 
827 	retval = 0;
828 exit:
829 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
830 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
831 
832 	return retval;
833 }
834 
835 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
836 				u32 length, u8 *data)
837 {
838 	int i;
839 	int ret;
840 	u32 buf;
841 	unsigned long timeout;
842 
843 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
844 
845 	if (buf & OTP_PWR_DN_PWRDN_N_) {
846 		/* clear it and wait to be cleared */
847 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
848 
849 		timeout = jiffies + HZ;
850 		do {
851 			usleep_range(1, 10);
852 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
853 			if (time_after(jiffies, timeout)) {
854 				netdev_warn(dev->net,
855 					    "timeout on OTP_PWR_DN");
856 				return -EIO;
857 			}
858 		} while (buf & OTP_PWR_DN_PWRDN_N_);
859 	}
860 
861 	for (i = 0; i < length; i++) {
862 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
863 					((offset + i) >> 8) & OTP_ADDR1_15_11);
864 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
865 					((offset + i) & OTP_ADDR2_10_3));
866 
867 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
868 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
869 
870 		timeout = jiffies + HZ;
871 		do {
872 			udelay(1);
873 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
874 			if (time_after(jiffies, timeout)) {
875 				netdev_warn(dev->net,
876 					    "timeout on OTP_STATUS");
877 				return -EIO;
878 			}
879 		} while (buf & OTP_STATUS_BUSY_);
880 
881 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
882 
883 		data[i] = (u8)(buf & 0xFF);
884 	}
885 
886 	return 0;
887 }
888 
889 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
890 				 u32 length, u8 *data)
891 {
892 	int i;
893 	int ret;
894 	u32 buf;
895 	unsigned long timeout;
896 
897 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
898 
899 	if (buf & OTP_PWR_DN_PWRDN_N_) {
900 		/* clear it and wait to be cleared */
901 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
902 
903 		timeout = jiffies + HZ;
904 		do {
905 			udelay(1);
906 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
907 			if (time_after(jiffies, timeout)) {
908 				netdev_warn(dev->net,
909 					    "timeout on OTP_PWR_DN completion");
910 				return -EIO;
911 			}
912 		} while (buf & OTP_PWR_DN_PWRDN_N_);
913 	}
914 
915 	/* set to BYTE program mode */
916 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
917 
918 	for (i = 0; i < length; i++) {
919 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
920 					((offset + i) >> 8) & OTP_ADDR1_15_11);
921 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
922 					((offset + i) & OTP_ADDR2_10_3));
923 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
924 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
925 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
926 
927 		timeout = jiffies + HZ;
928 		do {
929 			udelay(1);
930 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
931 			if (time_after(jiffies, timeout)) {
932 				netdev_warn(dev->net,
933 					    "Timeout on OTP_STATUS completion");
934 				return -EIO;
935 			}
936 		} while (buf & OTP_STATUS_BUSY_);
937 	}
938 
939 	return 0;
940 }
941 
942 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
943 			    u32 length, u8 *data)
944 {
945 	u8 sig;
946 	int ret;
947 
948 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
949 
950 	if (ret == 0) {
951 		if (sig == OTP_INDICATOR_1)
952 			offset = offset;
953 		else if (sig == OTP_INDICATOR_2)
954 			offset += 0x100;
955 		else
956 			ret = -EINVAL;
957 		if (!ret)
958 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
959 	}
960 
961 	return ret;
962 }
963 
964 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
965 {
966 	int i, ret;
967 
968 	for (i = 0; i < 100; i++) {
969 		u32 dp_sel;
970 
971 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
972 		if (unlikely(ret < 0))
973 			return -EIO;
974 
975 		if (dp_sel & DP_SEL_DPRDY_)
976 			return 0;
977 
978 		usleep_range(40, 100);
979 	}
980 
981 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
982 
983 	return -EIO;
984 }
985 
986 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
987 				  u32 addr, u32 length, u32 *buf)
988 {
989 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
990 	u32 dp_sel;
991 	int i, ret;
992 
993 	if (usb_autopm_get_interface(dev->intf) < 0)
994 			return 0;
995 
996 	mutex_lock(&pdata->dataport_mutex);
997 
998 	ret = lan78xx_dataport_wait_not_busy(dev);
999 	if (ret < 0)
1000 		goto done;
1001 
1002 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1003 
1004 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1005 	dp_sel |= ram_select;
1006 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1007 
1008 	for (i = 0; i < length; i++) {
1009 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1010 
1011 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1012 
1013 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1014 
1015 		ret = lan78xx_dataport_wait_not_busy(dev);
1016 		if (ret < 0)
1017 			goto done;
1018 	}
1019 
1020 done:
1021 	mutex_unlock(&pdata->dataport_mutex);
1022 	usb_autopm_put_interface(dev->intf);
1023 
1024 	return ret;
1025 }
1026 
1027 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1028 				    int index, u8 addr[ETH_ALEN])
1029 {
1030 	u32	temp;
1031 
1032 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1033 		temp = addr[3];
1034 		temp = addr[2] | (temp << 8);
1035 		temp = addr[1] | (temp << 8);
1036 		temp = addr[0] | (temp << 8);
1037 		pdata->pfilter_table[index][1] = temp;
1038 		temp = addr[5];
1039 		temp = addr[4] | (temp << 8);
1040 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1041 		pdata->pfilter_table[index][0] = temp;
1042 	}
1043 }
1044 
1045 /* returns hash bit number for given MAC address */
1046 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1047 {
1048 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1049 }
1050 
1051 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1052 {
1053 	struct lan78xx_priv *pdata =
1054 			container_of(param, struct lan78xx_priv, set_multicast);
1055 	struct lan78xx_net *dev = pdata->dev;
1056 	int i;
1057 	int ret;
1058 
1059 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1060 		  pdata->rfe_ctl);
1061 
1062 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1063 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1064 
1065 	for (i = 1; i < NUM_OF_MAF; i++) {
1066 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1067 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1068 					pdata->pfilter_table[i][1]);
1069 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1070 					pdata->pfilter_table[i][0]);
1071 	}
1072 
1073 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1074 }
1075 
1076 static void lan78xx_set_multicast(struct net_device *netdev)
1077 {
1078 	struct lan78xx_net *dev = netdev_priv(netdev);
1079 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1080 	unsigned long flags;
1081 	int i;
1082 
1083 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1084 
1085 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1086 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1087 
1088 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1089 			pdata->mchash_table[i] = 0;
1090 	/* pfilter_table[0] has own HW address */
1091 	for (i = 1; i < NUM_OF_MAF; i++) {
1092 			pdata->pfilter_table[i][0] =
1093 			pdata->pfilter_table[i][1] = 0;
1094 	}
1095 
1096 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1097 
1098 	if (dev->net->flags & IFF_PROMISC) {
1099 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1100 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1101 	} else {
1102 		if (dev->net->flags & IFF_ALLMULTI) {
1103 			netif_dbg(dev, drv, dev->net,
1104 				  "receive all multicast enabled");
1105 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1106 		}
1107 	}
1108 
1109 	if (netdev_mc_count(dev->net)) {
1110 		struct netdev_hw_addr *ha;
1111 		int i;
1112 
1113 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1114 
1115 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1116 
1117 		i = 1;
1118 		netdev_for_each_mc_addr(ha, netdev) {
1119 			/* set first 32 into Perfect Filter */
1120 			if (i < 33) {
1121 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1122 			} else {
1123 				u32 bitnum = lan78xx_hash(ha->addr);
1124 
1125 				pdata->mchash_table[bitnum / 32] |=
1126 							(1 << (bitnum % 32));
1127 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1128 			}
1129 			i++;
1130 		}
1131 	}
1132 
1133 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1134 
1135 	/* defer register writes to a sleepable context */
1136 	schedule_work(&pdata->set_multicast);
1137 }
1138 
1139 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1140 				      u16 lcladv, u16 rmtadv)
1141 {
1142 	u32 flow = 0, fct_flow = 0;
1143 	int ret;
1144 	u8 cap;
1145 
1146 	if (dev->fc_autoneg)
1147 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1148 	else
1149 		cap = dev->fc_request_control;
1150 
1151 	if (cap & FLOW_CTRL_TX)
1152 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1153 
1154 	if (cap & FLOW_CTRL_RX)
1155 		flow |= FLOW_CR_RX_FCEN_;
1156 
1157 	if (dev->udev->speed == USB_SPEED_SUPER)
1158 		fct_flow = 0x817;
1159 	else if (dev->udev->speed == USB_SPEED_HIGH)
1160 		fct_flow = 0x211;
1161 
1162 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1163 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1164 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1165 
1166 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1167 
1168 	/* threshold value should be set before enabling flow */
1169 	ret = lan78xx_write_reg(dev, FLOW, flow);
1170 
1171 	return 0;
1172 }
1173 
1174 static int lan78xx_link_reset(struct lan78xx_net *dev)
1175 {
1176 	struct phy_device *phydev = dev->net->phydev;
1177 	struct ethtool_link_ksettings ecmd;
1178 	int ladv, radv, ret;
1179 	u32 buf;
1180 
1181 	/* clear LAN78xx interrupt status */
1182 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1183 	if (unlikely(ret < 0))
1184 		return -EIO;
1185 
1186 	phy_read_status(phydev);
1187 
1188 	if (!phydev->link && dev->link_on) {
1189 		dev->link_on = false;
1190 
1191 		/* reset MAC */
1192 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1193 		if (unlikely(ret < 0))
1194 			return -EIO;
1195 		buf |= MAC_CR_RST_;
1196 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1197 		if (unlikely(ret < 0))
1198 			return -EIO;
1199 
1200 		del_timer(&dev->stat_monitor);
1201 	} else if (phydev->link && !dev->link_on) {
1202 		dev->link_on = true;
1203 
1204 		phy_ethtool_ksettings_get(phydev, &ecmd);
1205 
1206 		if (dev->udev->speed == USB_SPEED_SUPER) {
1207 			if (ecmd.base.speed == 1000) {
1208 				/* disable U2 */
1209 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1210 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1211 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1212 				/* enable U1 */
1213 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1214 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1215 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1216 			} else {
1217 				/* enable U1 & U2 */
1218 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1219 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1220 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1221 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1222 			}
1223 		}
1224 
1225 		ladv = phy_read(phydev, MII_ADVERTISE);
1226 		if (ladv < 0)
1227 			return ladv;
1228 
1229 		radv = phy_read(phydev, MII_LPA);
1230 		if (radv < 0)
1231 			return radv;
1232 
1233 		netif_dbg(dev, link, dev->net,
1234 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1235 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1236 
1237 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1238 						 radv);
1239 
1240 		if (!timer_pending(&dev->stat_monitor)) {
1241 			dev->delta = 1;
1242 			mod_timer(&dev->stat_monitor,
1243 				  jiffies + STAT_UPDATE_TIMER);
1244 		}
1245 	}
1246 
1247 	return ret;
1248 }
1249 
1250 /* some work can't be done in tasklets, so we use keventd
1251  *
1252  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1253  * but tasklet_schedule() doesn't.	hope the failure is rare.
1254  */
1255 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1256 {
1257 	set_bit(work, &dev->flags);
1258 	if (!schedule_delayed_work(&dev->wq, 0))
1259 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1260 }
1261 
1262 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1263 {
1264 	u32 intdata;
1265 
1266 	if (urb->actual_length != 4) {
1267 		netdev_warn(dev->net,
1268 			    "unexpected urb length %d", urb->actual_length);
1269 		return;
1270 	}
1271 
1272 	memcpy(&intdata, urb->transfer_buffer, 4);
1273 	le32_to_cpus(&intdata);
1274 
1275 	if (intdata & INT_ENP_PHY_INT) {
1276 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1277 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1278 
1279 		if (dev->domain_data.phyirq > 0)
1280 			generic_handle_irq(dev->domain_data.phyirq);
1281 	} else
1282 		netdev_warn(dev->net,
1283 			    "unexpected interrupt: 0x%08x\n", intdata);
1284 }
1285 
1286 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1287 {
1288 	return MAX_EEPROM_SIZE;
1289 }
1290 
1291 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1292 				      struct ethtool_eeprom *ee, u8 *data)
1293 {
1294 	struct lan78xx_net *dev = netdev_priv(netdev);
1295 	int ret;
1296 
1297 	ret = usb_autopm_get_interface(dev->intf);
1298 	if (ret)
1299 		return ret;
1300 
1301 	ee->magic = LAN78XX_EEPROM_MAGIC;
1302 
1303 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1304 
1305 	usb_autopm_put_interface(dev->intf);
1306 
1307 	return ret;
1308 }
1309 
1310 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1311 				      struct ethtool_eeprom *ee, u8 *data)
1312 {
1313 	struct lan78xx_net *dev = netdev_priv(netdev);
1314 	int ret;
1315 
1316 	ret = usb_autopm_get_interface(dev->intf);
1317 	if (ret)
1318 		return ret;
1319 
1320 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1321 	 * to load data from EEPROM
1322 	 */
1323 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1324 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1325 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1326 		 (ee->offset == 0) &&
1327 		 (ee->len == 512) &&
1328 		 (data[0] == OTP_INDICATOR_1))
1329 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1330 
1331 	usb_autopm_put_interface(dev->intf);
1332 
1333 	return ret;
1334 }
1335 
1336 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1337 				u8 *data)
1338 {
1339 	if (stringset == ETH_SS_STATS)
1340 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1341 }
1342 
1343 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1344 {
1345 	if (sset == ETH_SS_STATS)
1346 		return ARRAY_SIZE(lan78xx_gstrings);
1347 	else
1348 		return -EOPNOTSUPP;
1349 }
1350 
1351 static void lan78xx_get_stats(struct net_device *netdev,
1352 			      struct ethtool_stats *stats, u64 *data)
1353 {
1354 	struct lan78xx_net *dev = netdev_priv(netdev);
1355 
1356 	lan78xx_update_stats(dev);
1357 
1358 	mutex_lock(&dev->stats.access_lock);
1359 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1360 	mutex_unlock(&dev->stats.access_lock);
1361 }
1362 
1363 static void lan78xx_get_wol(struct net_device *netdev,
1364 			    struct ethtool_wolinfo *wol)
1365 {
1366 	struct lan78xx_net *dev = netdev_priv(netdev);
1367 	int ret;
1368 	u32 buf;
1369 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370 
1371 	if (usb_autopm_get_interface(dev->intf) < 0)
1372 			return;
1373 
1374 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1375 	if (unlikely(ret < 0)) {
1376 		wol->supported = 0;
1377 		wol->wolopts = 0;
1378 	} else {
1379 		if (buf & USB_CFG_RMT_WKP_) {
1380 			wol->supported = WAKE_ALL;
1381 			wol->wolopts = pdata->wol;
1382 		} else {
1383 			wol->supported = 0;
1384 			wol->wolopts = 0;
1385 		}
1386 	}
1387 
1388 	usb_autopm_put_interface(dev->intf);
1389 }
1390 
1391 static int lan78xx_set_wol(struct net_device *netdev,
1392 			   struct ethtool_wolinfo *wol)
1393 {
1394 	struct lan78xx_net *dev = netdev_priv(netdev);
1395 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1396 	int ret;
1397 
1398 	ret = usb_autopm_get_interface(dev->intf);
1399 	if (ret < 0)
1400 		return ret;
1401 
1402 	pdata->wol = 0;
1403 	if (wol->wolopts & WAKE_UCAST)
1404 		pdata->wol |= WAKE_UCAST;
1405 	if (wol->wolopts & WAKE_MCAST)
1406 		pdata->wol |= WAKE_MCAST;
1407 	if (wol->wolopts & WAKE_BCAST)
1408 		pdata->wol |= WAKE_BCAST;
1409 	if (wol->wolopts & WAKE_MAGIC)
1410 		pdata->wol |= WAKE_MAGIC;
1411 	if (wol->wolopts & WAKE_PHY)
1412 		pdata->wol |= WAKE_PHY;
1413 	if (wol->wolopts & WAKE_ARP)
1414 		pdata->wol |= WAKE_ARP;
1415 
1416 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1417 
1418 	phy_ethtool_set_wol(netdev->phydev, wol);
1419 
1420 	usb_autopm_put_interface(dev->intf);
1421 
1422 	return ret;
1423 }
1424 
1425 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1426 {
1427 	struct lan78xx_net *dev = netdev_priv(net);
1428 	struct phy_device *phydev = net->phydev;
1429 	int ret;
1430 	u32 buf;
1431 
1432 	ret = usb_autopm_get_interface(dev->intf);
1433 	if (ret < 0)
1434 		return ret;
1435 
1436 	ret = phy_ethtool_get_eee(phydev, edata);
1437 	if (ret < 0)
1438 		goto exit;
1439 
1440 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441 	if (buf & MAC_CR_EEE_EN_) {
1442 		edata->eee_enabled = true;
1443 		edata->eee_active = !!(edata->advertised &
1444 				       edata->lp_advertised);
1445 		edata->tx_lpi_enabled = true;
1446 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1447 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1448 		edata->tx_lpi_timer = buf;
1449 	} else {
1450 		edata->eee_enabled = false;
1451 		edata->eee_active = false;
1452 		edata->tx_lpi_enabled = false;
1453 		edata->tx_lpi_timer = 0;
1454 	}
1455 
1456 	ret = 0;
1457 exit:
1458 	usb_autopm_put_interface(dev->intf);
1459 
1460 	return ret;
1461 }
1462 
1463 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1464 {
1465 	struct lan78xx_net *dev = netdev_priv(net);
1466 	int ret;
1467 	u32 buf;
1468 
1469 	ret = usb_autopm_get_interface(dev->intf);
1470 	if (ret < 0)
1471 		return ret;
1472 
1473 	if (edata->eee_enabled) {
1474 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1475 		buf |= MAC_CR_EEE_EN_;
1476 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1477 
1478 		phy_ethtool_set_eee(net->phydev, edata);
1479 
1480 		buf = (u32)edata->tx_lpi_timer;
1481 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1482 	} else {
1483 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1484 		buf &= ~MAC_CR_EEE_EN_;
1485 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1486 	}
1487 
1488 	usb_autopm_put_interface(dev->intf);
1489 
1490 	return 0;
1491 }
1492 
1493 static u32 lan78xx_get_link(struct net_device *net)
1494 {
1495 	phy_read_status(net->phydev);
1496 
1497 	return net->phydev->link;
1498 }
1499 
1500 static void lan78xx_get_drvinfo(struct net_device *net,
1501 				struct ethtool_drvinfo *info)
1502 {
1503 	struct lan78xx_net *dev = netdev_priv(net);
1504 
1505 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1506 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1507 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1508 }
1509 
1510 static u32 lan78xx_get_msglevel(struct net_device *net)
1511 {
1512 	struct lan78xx_net *dev = netdev_priv(net);
1513 
1514 	return dev->msg_enable;
1515 }
1516 
1517 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1518 {
1519 	struct lan78xx_net *dev = netdev_priv(net);
1520 
1521 	dev->msg_enable = level;
1522 }
1523 
1524 static int lan78xx_get_link_ksettings(struct net_device *net,
1525 				      struct ethtool_link_ksettings *cmd)
1526 {
1527 	struct lan78xx_net *dev = netdev_priv(net);
1528 	struct phy_device *phydev = net->phydev;
1529 	int ret;
1530 
1531 	ret = usb_autopm_get_interface(dev->intf);
1532 	if (ret < 0)
1533 		return ret;
1534 
1535 	phy_ethtool_ksettings_get(phydev, cmd);
1536 
1537 	usb_autopm_put_interface(dev->intf);
1538 
1539 	return ret;
1540 }
1541 
1542 static int lan78xx_set_link_ksettings(struct net_device *net,
1543 				      const struct ethtool_link_ksettings *cmd)
1544 {
1545 	struct lan78xx_net *dev = netdev_priv(net);
1546 	struct phy_device *phydev = net->phydev;
1547 	int ret = 0;
1548 	int temp;
1549 
1550 	ret = usb_autopm_get_interface(dev->intf);
1551 	if (ret < 0)
1552 		return ret;
1553 
1554 	/* change speed & duplex */
1555 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1556 
1557 	if (!cmd->base.autoneg) {
1558 		/* force link down */
1559 		temp = phy_read(phydev, MII_BMCR);
1560 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1561 		mdelay(1);
1562 		phy_write(phydev, MII_BMCR, temp);
1563 	}
1564 
1565 	usb_autopm_put_interface(dev->intf);
1566 
1567 	return ret;
1568 }
1569 
1570 static void lan78xx_get_pause(struct net_device *net,
1571 			      struct ethtool_pauseparam *pause)
1572 {
1573 	struct lan78xx_net *dev = netdev_priv(net);
1574 	struct phy_device *phydev = net->phydev;
1575 	struct ethtool_link_ksettings ecmd;
1576 
1577 	phy_ethtool_ksettings_get(phydev, &ecmd);
1578 
1579 	pause->autoneg = dev->fc_autoneg;
1580 
1581 	if (dev->fc_request_control & FLOW_CTRL_TX)
1582 		pause->tx_pause = 1;
1583 
1584 	if (dev->fc_request_control & FLOW_CTRL_RX)
1585 		pause->rx_pause = 1;
1586 }
1587 
1588 static int lan78xx_set_pause(struct net_device *net,
1589 			     struct ethtool_pauseparam *pause)
1590 {
1591 	struct lan78xx_net *dev = netdev_priv(net);
1592 	struct phy_device *phydev = net->phydev;
1593 	struct ethtool_link_ksettings ecmd;
1594 	int ret;
1595 
1596 	phy_ethtool_ksettings_get(phydev, &ecmd);
1597 
1598 	if (pause->autoneg && !ecmd.base.autoneg) {
1599 		ret = -EINVAL;
1600 		goto exit;
1601 	}
1602 
1603 	dev->fc_request_control = 0;
1604 	if (pause->rx_pause)
1605 		dev->fc_request_control |= FLOW_CTRL_RX;
1606 
1607 	if (pause->tx_pause)
1608 		dev->fc_request_control |= FLOW_CTRL_TX;
1609 
1610 	if (ecmd.base.autoneg) {
1611 		u32 mii_adv;
1612 		u32 advertising;
1613 
1614 		ethtool_convert_link_mode_to_legacy_u32(
1615 			&advertising, ecmd.link_modes.advertising);
1616 
1617 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1618 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1619 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1620 
1621 		ethtool_convert_legacy_u32_to_link_mode(
1622 			ecmd.link_modes.advertising, advertising);
1623 
1624 		phy_ethtool_ksettings_set(phydev, &ecmd);
1625 	}
1626 
1627 	dev->fc_autoneg = pause->autoneg;
1628 
1629 	ret = 0;
1630 exit:
1631 	return ret;
1632 }
1633 
1634 static int lan78xx_get_regs_len(struct net_device *netdev)
1635 {
1636 	if (!netdev->phydev)
1637 		return (sizeof(lan78xx_regs));
1638 	else
1639 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1640 }
1641 
1642 static void
1643 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1644 		 void *buf)
1645 {
1646 	u32 *data = buf;
1647 	int i, j;
1648 	struct lan78xx_net *dev = netdev_priv(netdev);
1649 
1650 	/* Read Device/MAC registers */
1651 	for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1652 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1653 
1654 	if (!netdev->phydev)
1655 		return;
1656 
1657 	/* Read PHY registers */
1658 	for (j = 0; j < 32; i++, j++)
1659 		data[i] = phy_read(netdev->phydev, j);
1660 }
1661 
1662 static const struct ethtool_ops lan78xx_ethtool_ops = {
1663 	.get_link	= lan78xx_get_link,
1664 	.nway_reset	= phy_ethtool_nway_reset,
1665 	.get_drvinfo	= lan78xx_get_drvinfo,
1666 	.get_msglevel	= lan78xx_get_msglevel,
1667 	.set_msglevel	= lan78xx_set_msglevel,
1668 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1669 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1670 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1671 	.get_ethtool_stats = lan78xx_get_stats,
1672 	.get_sset_count = lan78xx_get_sset_count,
1673 	.get_strings	= lan78xx_get_strings,
1674 	.get_wol	= lan78xx_get_wol,
1675 	.set_wol	= lan78xx_set_wol,
1676 	.get_eee	= lan78xx_get_eee,
1677 	.set_eee	= lan78xx_set_eee,
1678 	.get_pauseparam	= lan78xx_get_pause,
1679 	.set_pauseparam	= lan78xx_set_pause,
1680 	.get_link_ksettings = lan78xx_get_link_ksettings,
1681 	.set_link_ksettings = lan78xx_set_link_ksettings,
1682 	.get_regs_len	= lan78xx_get_regs_len,
1683 	.get_regs	= lan78xx_get_regs,
1684 };
1685 
1686 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1687 {
1688 	if (!netif_running(netdev))
1689 		return -EINVAL;
1690 
1691 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1692 }
1693 
1694 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1695 {
1696 	u32 addr_lo, addr_hi;
1697 	int ret;
1698 	u8 addr[6];
1699 
1700 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1701 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1702 
1703 	addr[0] = addr_lo & 0xFF;
1704 	addr[1] = (addr_lo >> 8) & 0xFF;
1705 	addr[2] = (addr_lo >> 16) & 0xFF;
1706 	addr[3] = (addr_lo >> 24) & 0xFF;
1707 	addr[4] = addr_hi & 0xFF;
1708 	addr[5] = (addr_hi >> 8) & 0xFF;
1709 
1710 	if (!is_valid_ether_addr(addr)) {
1711 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1712 			/* valid address present in Device Tree */
1713 			netif_dbg(dev, ifup, dev->net,
1714 				  "MAC address read from Device Tree");
1715 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1716 						 ETH_ALEN, addr) == 0) ||
1717 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1718 					      ETH_ALEN, addr) == 0)) &&
1719 			   is_valid_ether_addr(addr)) {
1720 			/* eeprom values are valid so use them */
1721 			netif_dbg(dev, ifup, dev->net,
1722 				  "MAC address read from EEPROM");
1723 		} else {
1724 			/* generate random MAC */
1725 			random_ether_addr(addr);
1726 			netif_dbg(dev, ifup, dev->net,
1727 				  "MAC address set to random addr");
1728 		}
1729 
1730 		addr_lo = addr[0] | (addr[1] << 8) |
1731 			  (addr[2] << 16) | (addr[3] << 24);
1732 		addr_hi = addr[4] | (addr[5] << 8);
1733 
1734 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1735 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1736 	}
1737 
1738 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1739 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1740 
1741 	ether_addr_copy(dev->net->dev_addr, addr);
1742 }
1743 
1744 /* MDIO read and write wrappers for phylib */
1745 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1746 {
1747 	struct lan78xx_net *dev = bus->priv;
1748 	u32 val, addr;
1749 	int ret;
1750 
1751 	ret = usb_autopm_get_interface(dev->intf);
1752 	if (ret < 0)
1753 		return ret;
1754 
1755 	mutex_lock(&dev->phy_mutex);
1756 
1757 	/* confirm MII not busy */
1758 	ret = lan78xx_phy_wait_not_busy(dev);
1759 	if (ret < 0)
1760 		goto done;
1761 
1762 	/* set the address, index & direction (read from PHY) */
1763 	addr = mii_access(phy_id, idx, MII_READ);
1764 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1765 
1766 	ret = lan78xx_phy_wait_not_busy(dev);
1767 	if (ret < 0)
1768 		goto done;
1769 
1770 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1771 
1772 	ret = (int)(val & 0xFFFF);
1773 
1774 done:
1775 	mutex_unlock(&dev->phy_mutex);
1776 	usb_autopm_put_interface(dev->intf);
1777 
1778 	return ret;
1779 }
1780 
1781 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1782 				 u16 regval)
1783 {
1784 	struct lan78xx_net *dev = bus->priv;
1785 	u32 val, addr;
1786 	int ret;
1787 
1788 	ret = usb_autopm_get_interface(dev->intf);
1789 	if (ret < 0)
1790 		return ret;
1791 
1792 	mutex_lock(&dev->phy_mutex);
1793 
1794 	/* confirm MII not busy */
1795 	ret = lan78xx_phy_wait_not_busy(dev);
1796 	if (ret < 0)
1797 		goto done;
1798 
1799 	val = (u32)regval;
1800 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1801 
1802 	/* set the address, index & direction (write to PHY) */
1803 	addr = mii_access(phy_id, idx, MII_WRITE);
1804 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1805 
1806 	ret = lan78xx_phy_wait_not_busy(dev);
1807 	if (ret < 0)
1808 		goto done;
1809 
1810 done:
1811 	mutex_unlock(&dev->phy_mutex);
1812 	usb_autopm_put_interface(dev->intf);
1813 	return 0;
1814 }
1815 
1816 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1817 {
1818 	struct device_node *node;
1819 	int ret;
1820 
1821 	dev->mdiobus = mdiobus_alloc();
1822 	if (!dev->mdiobus) {
1823 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1824 		return -ENOMEM;
1825 	}
1826 
1827 	dev->mdiobus->priv = (void *)dev;
1828 	dev->mdiobus->read = lan78xx_mdiobus_read;
1829 	dev->mdiobus->write = lan78xx_mdiobus_write;
1830 	dev->mdiobus->name = "lan78xx-mdiobus";
1831 
1832 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1833 		 dev->udev->bus->busnum, dev->udev->devnum);
1834 
1835 	switch (dev->chipid) {
1836 	case ID_REV_CHIP_ID_7800_:
1837 	case ID_REV_CHIP_ID_7850_:
1838 		/* set to internal PHY id */
1839 		dev->mdiobus->phy_mask = ~(1 << 1);
1840 		break;
1841 	case ID_REV_CHIP_ID_7801_:
1842 		/* scan thru PHYAD[2..0] */
1843 		dev->mdiobus->phy_mask = ~(0xFF);
1844 		break;
1845 	}
1846 
1847 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1848 	if (node) {
1849 		ret = of_mdiobus_register(dev->mdiobus, node);
1850 		of_node_put(node);
1851 	} else {
1852 		ret = mdiobus_register(dev->mdiobus);
1853 	}
1854 	if (ret) {
1855 		netdev_err(dev->net, "can't register MDIO bus\n");
1856 		goto exit1;
1857 	}
1858 
1859 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1860 	return 0;
1861 exit1:
1862 	mdiobus_free(dev->mdiobus);
1863 	return ret;
1864 }
1865 
1866 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1867 {
1868 	mdiobus_unregister(dev->mdiobus);
1869 	mdiobus_free(dev->mdiobus);
1870 }
1871 
1872 static void lan78xx_link_status_change(struct net_device *net)
1873 {
1874 	struct phy_device *phydev = net->phydev;
1875 	int ret, temp;
1876 
1877 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1878 	 * when cable is switched between long(~50+m) and short one.
1879 	 * As workaround, set to 10 before setting to 100
1880 	 * at forced 100 F/H mode.
1881 	 */
1882 	if (!phydev->autoneg && (phydev->speed == 100)) {
1883 		/* disable phy interrupt */
1884 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1885 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1886 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1887 
1888 		temp = phy_read(phydev, MII_BMCR);
1889 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1890 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1891 		temp |= BMCR_SPEED100;
1892 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1893 
1894 		/* clear pending interrupt generated while workaround */
1895 		temp = phy_read(phydev, LAN88XX_INT_STS);
1896 
1897 		/* enable phy interrupt back */
1898 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1899 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1900 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1901 	}
1902 }
1903 
1904 static int irq_map(struct irq_domain *d, unsigned int irq,
1905 		   irq_hw_number_t hwirq)
1906 {
1907 	struct irq_domain_data *data = d->host_data;
1908 
1909 	irq_set_chip_data(irq, data);
1910 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1911 	irq_set_noprobe(irq);
1912 
1913 	return 0;
1914 }
1915 
1916 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1917 {
1918 	irq_set_chip_and_handler(irq, NULL, NULL);
1919 	irq_set_chip_data(irq, NULL);
1920 }
1921 
1922 static const struct irq_domain_ops chip_domain_ops = {
1923 	.map	= irq_map,
1924 	.unmap	= irq_unmap,
1925 };
1926 
1927 static void lan78xx_irq_mask(struct irq_data *irqd)
1928 {
1929 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1930 
1931 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1932 }
1933 
1934 static void lan78xx_irq_unmask(struct irq_data *irqd)
1935 {
1936 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1937 
1938 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1939 }
1940 
1941 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1942 {
1943 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1944 
1945 	mutex_lock(&data->irq_lock);
1946 }
1947 
1948 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1949 {
1950 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1951 	struct lan78xx_net *dev =
1952 			container_of(data, struct lan78xx_net, domain_data);
1953 	u32 buf;
1954 	int ret;
1955 
1956 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1957 	 * are only two callbacks executed in non-atomic contex.
1958 	 */
1959 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1960 	if (buf != data->irqenable)
1961 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1962 
1963 	mutex_unlock(&data->irq_lock);
1964 }
1965 
1966 static struct irq_chip lan78xx_irqchip = {
1967 	.name			= "lan78xx-irqs",
1968 	.irq_mask		= lan78xx_irq_mask,
1969 	.irq_unmask		= lan78xx_irq_unmask,
1970 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1971 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1972 };
1973 
1974 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1975 {
1976 	struct device_node *of_node;
1977 	struct irq_domain *irqdomain;
1978 	unsigned int irqmap = 0;
1979 	u32 buf;
1980 	int ret = 0;
1981 
1982 	of_node = dev->udev->dev.parent->of_node;
1983 
1984 	mutex_init(&dev->domain_data.irq_lock);
1985 
1986 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1987 	dev->domain_data.irqenable = buf;
1988 
1989 	dev->domain_data.irqchip = &lan78xx_irqchip;
1990 	dev->domain_data.irq_handler = handle_simple_irq;
1991 
1992 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1993 					  &chip_domain_ops, &dev->domain_data);
1994 	if (irqdomain) {
1995 		/* create mapping for PHY interrupt */
1996 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1997 		if (!irqmap) {
1998 			irq_domain_remove(irqdomain);
1999 
2000 			irqdomain = NULL;
2001 			ret = -EINVAL;
2002 		}
2003 	} else {
2004 		ret = -EINVAL;
2005 	}
2006 
2007 	dev->domain_data.irqdomain = irqdomain;
2008 	dev->domain_data.phyirq = irqmap;
2009 
2010 	return ret;
2011 }
2012 
2013 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2014 {
2015 	if (dev->domain_data.phyirq > 0) {
2016 		irq_dispose_mapping(dev->domain_data.phyirq);
2017 
2018 		if (dev->domain_data.irqdomain)
2019 			irq_domain_remove(dev->domain_data.irqdomain);
2020 	}
2021 	dev->domain_data.phyirq = 0;
2022 	dev->domain_data.irqdomain = NULL;
2023 }
2024 
2025 static int lan8835_fixup(struct phy_device *phydev)
2026 {
2027 	int buf;
2028 	int ret;
2029 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2030 
2031 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2032 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2033 	buf &= ~0x1800;
2034 	buf |= 0x0800;
2035 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2036 
2037 	/* RGMII MAC TXC Delay Enable */
2038 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2039 				MAC_RGMII_ID_TXC_DELAY_EN_);
2040 
2041 	/* RGMII TX DLL Tune Adjust */
2042 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2043 
2044 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2045 
2046 	return 1;
2047 }
2048 
2049 static int ksz9031rnx_fixup(struct phy_device *phydev)
2050 {
2051 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2052 
2053 	/* Micrel9301RNX PHY configuration */
2054 	/* RGMII Control Signal Pad Skew */
2055 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2056 	/* RGMII RX Data Pad Skew */
2057 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2058 	/* RGMII RX Clock Pad Skew */
2059 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2060 
2061 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2062 
2063 	return 1;
2064 }
2065 
2066 static int lan78xx_phy_init(struct lan78xx_net *dev)
2067 {
2068 	int ret;
2069 	u32 mii_adv;
2070 	struct phy_device *phydev;
2071 
2072 	phydev = phy_find_first(dev->mdiobus);
2073 	if (!phydev) {
2074 		netdev_err(dev->net, "no PHY found\n");
2075 		return -EIO;
2076 	}
2077 
2078 	if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2079 	    (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2080 		phydev->is_internal = true;
2081 		dev->interface = PHY_INTERFACE_MODE_GMII;
2082 
2083 	} else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2084 		if (!phydev->drv) {
2085 			netdev_err(dev->net, "no PHY driver found\n");
2086 			return -EIO;
2087 		}
2088 
2089 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2090 
2091 		/* external PHY fixup for KSZ9031RNX */
2092 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2093 						 ksz9031rnx_fixup);
2094 		if (ret < 0) {
2095 			netdev_err(dev->net, "fail to register fixup\n");
2096 			return ret;
2097 		}
2098 		/* external PHY fixup for LAN8835 */
2099 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2100 						 lan8835_fixup);
2101 		if (ret < 0) {
2102 			netdev_err(dev->net, "fail to register fixup\n");
2103 			return ret;
2104 		}
2105 		/* add more external PHY fixup here if needed */
2106 
2107 		phydev->is_internal = false;
2108 	} else {
2109 		netdev_err(dev->net, "unknown ID found\n");
2110 		ret = -EIO;
2111 		goto error;
2112 	}
2113 
2114 	/* if phyirq is not set, use polling mode in phylib */
2115 	if (dev->domain_data.phyirq > 0)
2116 		phydev->irq = dev->domain_data.phyirq;
2117 	else
2118 		phydev->irq = 0;
2119 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2120 
2121 	/* set to AUTOMDIX */
2122 	phydev->mdix = ETH_TP_MDI_AUTO;
2123 
2124 	ret = phy_connect_direct(dev->net, phydev,
2125 				 lan78xx_link_status_change,
2126 				 dev->interface);
2127 	if (ret) {
2128 		netdev_err(dev->net, "can't attach PHY to %s\n",
2129 			   dev->mdiobus->id);
2130 		return -EIO;
2131 	}
2132 
2133 	/* MAC doesn't support 1000T Half */
2134 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
2135 
2136 	/* support both flow controls */
2137 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2138 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2139 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2140 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2141 
2142 	if (phydev->mdio.dev.of_node) {
2143 		u32 reg;
2144 		int len;
2145 
2146 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2147 						      "microchip,led-modes",
2148 						      sizeof(u32));
2149 		if (len >= 0) {
2150 			/* Ensure the appropriate LEDs are enabled */
2151 			lan78xx_read_reg(dev, HW_CFG, &reg);
2152 			reg &= ~(HW_CFG_LED0_EN_ |
2153 				 HW_CFG_LED1_EN_ |
2154 				 HW_CFG_LED2_EN_ |
2155 				 HW_CFG_LED3_EN_);
2156 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2157 				(len > 1) * HW_CFG_LED1_EN_ |
2158 				(len > 2) * HW_CFG_LED2_EN_ |
2159 				(len > 3) * HW_CFG_LED3_EN_;
2160 			lan78xx_write_reg(dev, HW_CFG, reg);
2161 		}
2162 	}
2163 
2164 	genphy_config_aneg(phydev);
2165 
2166 	dev->fc_autoneg = phydev->autoneg;
2167 
2168 	return 0;
2169 
2170 error:
2171 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2172 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2173 
2174 	return ret;
2175 }
2176 
2177 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2178 {
2179 	int ret = 0;
2180 	u32 buf;
2181 	bool rxenabled;
2182 
2183 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2184 
2185 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2186 
2187 	if (rxenabled) {
2188 		buf &= ~MAC_RX_RXEN_;
2189 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2190 	}
2191 
2192 	/* add 4 to size for FCS */
2193 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2194 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2195 
2196 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2197 
2198 	if (rxenabled) {
2199 		buf |= MAC_RX_RXEN_;
2200 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2201 	}
2202 
2203 	return 0;
2204 }
2205 
2206 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2207 {
2208 	struct sk_buff *skb;
2209 	unsigned long flags;
2210 	int count = 0;
2211 
2212 	spin_lock_irqsave(&q->lock, flags);
2213 	while (!skb_queue_empty(q)) {
2214 		struct skb_data	*entry;
2215 		struct urb *urb;
2216 		int ret;
2217 
2218 		skb_queue_walk(q, skb) {
2219 			entry = (struct skb_data *)skb->cb;
2220 			if (entry->state != unlink_start)
2221 				goto found;
2222 		}
2223 		break;
2224 found:
2225 		entry->state = unlink_start;
2226 		urb = entry->urb;
2227 
2228 		/* Get reference count of the URB to avoid it to be
2229 		 * freed during usb_unlink_urb, which may trigger
2230 		 * use-after-free problem inside usb_unlink_urb since
2231 		 * usb_unlink_urb is always racing with .complete
2232 		 * handler(include defer_bh).
2233 		 */
2234 		usb_get_urb(urb);
2235 		spin_unlock_irqrestore(&q->lock, flags);
2236 		/* during some PM-driven resume scenarios,
2237 		 * these (async) unlinks complete immediately
2238 		 */
2239 		ret = usb_unlink_urb(urb);
2240 		if (ret != -EINPROGRESS && ret != 0)
2241 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2242 		else
2243 			count++;
2244 		usb_put_urb(urb);
2245 		spin_lock_irqsave(&q->lock, flags);
2246 	}
2247 	spin_unlock_irqrestore(&q->lock, flags);
2248 	return count;
2249 }
2250 
2251 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2252 {
2253 	struct lan78xx_net *dev = netdev_priv(netdev);
2254 	int ll_mtu = new_mtu + netdev->hard_header_len;
2255 	int old_hard_mtu = dev->hard_mtu;
2256 	int old_rx_urb_size = dev->rx_urb_size;
2257 	int ret;
2258 
2259 	/* no second zero-length packet read wanted after mtu-sized packets */
2260 	if ((ll_mtu % dev->maxpacket) == 0)
2261 		return -EDOM;
2262 
2263 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2264 
2265 	netdev->mtu = new_mtu;
2266 
2267 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2268 	if (dev->rx_urb_size == old_hard_mtu) {
2269 		dev->rx_urb_size = dev->hard_mtu;
2270 		if (dev->rx_urb_size > old_rx_urb_size) {
2271 			if (netif_running(dev->net)) {
2272 				unlink_urbs(dev, &dev->rxq);
2273 				tasklet_schedule(&dev->bh);
2274 			}
2275 		}
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2282 {
2283 	struct lan78xx_net *dev = netdev_priv(netdev);
2284 	struct sockaddr *addr = p;
2285 	u32 addr_lo, addr_hi;
2286 	int ret;
2287 
2288 	if (netif_running(netdev))
2289 		return -EBUSY;
2290 
2291 	if (!is_valid_ether_addr(addr->sa_data))
2292 		return -EADDRNOTAVAIL;
2293 
2294 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2295 
2296 	addr_lo = netdev->dev_addr[0] |
2297 		  netdev->dev_addr[1] << 8 |
2298 		  netdev->dev_addr[2] << 16 |
2299 		  netdev->dev_addr[3] << 24;
2300 	addr_hi = netdev->dev_addr[4] |
2301 		  netdev->dev_addr[5] << 8;
2302 
2303 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2304 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2305 
2306 	return 0;
2307 }
2308 
2309 /* Enable or disable Rx checksum offload engine */
2310 static int lan78xx_set_features(struct net_device *netdev,
2311 				netdev_features_t features)
2312 {
2313 	struct lan78xx_net *dev = netdev_priv(netdev);
2314 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2315 	unsigned long flags;
2316 	int ret;
2317 
2318 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2319 
2320 	if (features & NETIF_F_RXCSUM) {
2321 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2322 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2323 	} else {
2324 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2325 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2326 	}
2327 
2328 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2329 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2330 	else
2331 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2332 
2333 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2334 
2335 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2336 
2337 	return 0;
2338 }
2339 
2340 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2341 {
2342 	struct lan78xx_priv *pdata =
2343 			container_of(param, struct lan78xx_priv, set_vlan);
2344 	struct lan78xx_net *dev = pdata->dev;
2345 
2346 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2347 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2348 }
2349 
2350 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2351 				   __be16 proto, u16 vid)
2352 {
2353 	struct lan78xx_net *dev = netdev_priv(netdev);
2354 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2355 	u16 vid_bit_index;
2356 	u16 vid_dword_index;
2357 
2358 	vid_dword_index = (vid >> 5) & 0x7F;
2359 	vid_bit_index = vid & 0x1F;
2360 
2361 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2362 
2363 	/* defer register writes to a sleepable context */
2364 	schedule_work(&pdata->set_vlan);
2365 
2366 	return 0;
2367 }
2368 
2369 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2370 				    __be16 proto, u16 vid)
2371 {
2372 	struct lan78xx_net *dev = netdev_priv(netdev);
2373 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2374 	u16 vid_bit_index;
2375 	u16 vid_dword_index;
2376 
2377 	vid_dword_index = (vid >> 5) & 0x7F;
2378 	vid_bit_index = vid & 0x1F;
2379 
2380 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2381 
2382 	/* defer register writes to a sleepable context */
2383 	schedule_work(&pdata->set_vlan);
2384 
2385 	return 0;
2386 }
2387 
2388 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2389 {
2390 	int ret;
2391 	u32 buf;
2392 	u32 regs[6] = { 0 };
2393 
2394 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2395 	if (buf & USB_CFG1_LTM_ENABLE_) {
2396 		u8 temp[2];
2397 		/* Get values from EEPROM first */
2398 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2399 			if (temp[0] == 24) {
2400 				ret = lan78xx_read_raw_eeprom(dev,
2401 							      temp[1] * 2,
2402 							      24,
2403 							      (u8 *)regs);
2404 				if (ret < 0)
2405 					return;
2406 			}
2407 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2408 			if (temp[0] == 24) {
2409 				ret = lan78xx_read_raw_otp(dev,
2410 							   temp[1] * 2,
2411 							   24,
2412 							   (u8 *)regs);
2413 				if (ret < 0)
2414 					return;
2415 			}
2416 		}
2417 	}
2418 
2419 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2420 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2421 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2422 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2423 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2424 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2425 }
2426 
2427 static int lan78xx_reset(struct lan78xx_net *dev)
2428 {
2429 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2430 	u32 buf;
2431 	int ret = 0;
2432 	unsigned long timeout;
2433 	u8 sig;
2434 
2435 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2436 	buf |= HW_CFG_LRST_;
2437 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2438 
2439 	timeout = jiffies + HZ;
2440 	do {
2441 		mdelay(1);
2442 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2443 		if (time_after(jiffies, timeout)) {
2444 			netdev_warn(dev->net,
2445 				    "timeout on completion of LiteReset");
2446 			return -EIO;
2447 		}
2448 	} while (buf & HW_CFG_LRST_);
2449 
2450 	lan78xx_init_mac_address(dev);
2451 
2452 	/* save DEVID for later usage */
2453 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2454 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2455 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2456 
2457 	/* Respond to the IN token with a NAK */
2458 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2459 	buf |= USB_CFG_BIR_;
2460 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2461 
2462 	/* Init LTM */
2463 	lan78xx_init_ltm(dev);
2464 
2465 	if (dev->udev->speed == USB_SPEED_SUPER) {
2466 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2467 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2468 		dev->rx_qlen = 4;
2469 		dev->tx_qlen = 4;
2470 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2471 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2472 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2473 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2474 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2475 	} else {
2476 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2477 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2478 		dev->rx_qlen = 4;
2479 		dev->tx_qlen = 4;
2480 	}
2481 
2482 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2483 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2484 
2485 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2486 	buf |= HW_CFG_MEF_;
2487 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2488 
2489 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2490 	buf |= USB_CFG_BCE_;
2491 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2492 
2493 	/* set FIFO sizes */
2494 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2495 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2496 
2497 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2498 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2499 
2500 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2501 	ret = lan78xx_write_reg(dev, FLOW, 0);
2502 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2503 
2504 	/* Don't need rfe_ctl_lock during initialisation */
2505 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2506 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2507 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2508 
2509 	/* Enable or disable checksum offload engines */
2510 	lan78xx_set_features(dev->net, dev->net->features);
2511 
2512 	lan78xx_set_multicast(dev->net);
2513 
2514 	/* reset PHY */
2515 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2516 	buf |= PMT_CTL_PHY_RST_;
2517 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2518 
2519 	timeout = jiffies + HZ;
2520 	do {
2521 		mdelay(1);
2522 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2523 		if (time_after(jiffies, timeout)) {
2524 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2525 			return -EIO;
2526 		}
2527 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2528 
2529 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2530 	/* LAN7801 only has RGMII mode */
2531 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2532 		buf &= ~MAC_CR_GMII_EN_;
2533 
2534 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2535 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2536 		if (!ret && sig != EEPROM_INDICATOR) {
2537 			/* Implies there is no external eeprom. Set mac speed */
2538 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2539 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2540 		}
2541 	}
2542 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2543 
2544 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2545 	buf |= MAC_TX_TXEN_;
2546 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2547 
2548 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2549 	buf |= FCT_TX_CTL_EN_;
2550 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2551 
2552 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2553 
2554 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2555 	buf |= MAC_RX_RXEN_;
2556 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2557 
2558 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2559 	buf |= FCT_RX_CTL_EN_;
2560 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2561 
2562 	return 0;
2563 }
2564 
2565 static void lan78xx_init_stats(struct lan78xx_net *dev)
2566 {
2567 	u32 *p;
2568 	int i;
2569 
2570 	/* initialize for stats update
2571 	 * some counters are 20bits and some are 32bits
2572 	 */
2573 	p = (u32 *)&dev->stats.rollover_max;
2574 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2575 		p[i] = 0xFFFFF;
2576 
2577 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2578 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2579 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2580 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2581 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2582 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2583 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2584 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2585 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2586 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2587 
2588 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2589 }
2590 
2591 static int lan78xx_open(struct net_device *net)
2592 {
2593 	struct lan78xx_net *dev = netdev_priv(net);
2594 	int ret;
2595 
2596 	ret = usb_autopm_get_interface(dev->intf);
2597 	if (ret < 0)
2598 		goto out;
2599 
2600 	phy_start(net->phydev);
2601 
2602 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2603 
2604 	/* for Link Check */
2605 	if (dev->urb_intr) {
2606 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2607 		if (ret < 0) {
2608 			netif_err(dev, ifup, dev->net,
2609 				  "intr submit %d\n", ret);
2610 			goto done;
2611 		}
2612 	}
2613 
2614 	lan78xx_init_stats(dev);
2615 
2616 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2617 
2618 	netif_start_queue(net);
2619 
2620 	dev->link_on = false;
2621 
2622 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2623 done:
2624 	usb_autopm_put_interface(dev->intf);
2625 
2626 out:
2627 	return ret;
2628 }
2629 
2630 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2631 {
2632 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2633 	DECLARE_WAITQUEUE(wait, current);
2634 	int temp;
2635 
2636 	/* ensure there are no more active urbs */
2637 	add_wait_queue(&unlink_wakeup, &wait);
2638 	set_current_state(TASK_UNINTERRUPTIBLE);
2639 	dev->wait = &unlink_wakeup;
2640 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2641 
2642 	/* maybe wait for deletions to finish. */
2643 	while (!skb_queue_empty(&dev->rxq) &&
2644 	       !skb_queue_empty(&dev->txq) &&
2645 	       !skb_queue_empty(&dev->done)) {
2646 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2647 		set_current_state(TASK_UNINTERRUPTIBLE);
2648 		netif_dbg(dev, ifdown, dev->net,
2649 			  "waited for %d urb completions\n", temp);
2650 	}
2651 	set_current_state(TASK_RUNNING);
2652 	dev->wait = NULL;
2653 	remove_wait_queue(&unlink_wakeup, &wait);
2654 }
2655 
2656 static int lan78xx_stop(struct net_device *net)
2657 {
2658 	struct lan78xx_net		*dev = netdev_priv(net);
2659 
2660 	if (timer_pending(&dev->stat_monitor))
2661 		del_timer_sync(&dev->stat_monitor);
2662 
2663 	if (net->phydev)
2664 		phy_stop(net->phydev);
2665 
2666 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2667 	netif_stop_queue(net);
2668 
2669 	netif_info(dev, ifdown, dev->net,
2670 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2671 		   net->stats.rx_packets, net->stats.tx_packets,
2672 		   net->stats.rx_errors, net->stats.tx_errors);
2673 
2674 	lan78xx_terminate_urbs(dev);
2675 
2676 	usb_kill_urb(dev->urb_intr);
2677 
2678 	skb_queue_purge(&dev->rxq_pause);
2679 
2680 	/* deferred work (task, timer, softirq) must also stop.
2681 	 * can't flush_scheduled_work() until we drop rtnl (later),
2682 	 * else workers could deadlock; so make workers a NOP.
2683 	 */
2684 	dev->flags = 0;
2685 	cancel_delayed_work_sync(&dev->wq);
2686 	tasklet_kill(&dev->bh);
2687 
2688 	usb_autopm_put_interface(dev->intf);
2689 
2690 	return 0;
2691 }
2692 
2693 static int lan78xx_linearize(struct sk_buff *skb)
2694 {
2695 	return skb_linearize(skb);
2696 }
2697 
2698 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2699 				       struct sk_buff *skb, gfp_t flags)
2700 {
2701 	u32 tx_cmd_a, tx_cmd_b;
2702 
2703 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2704 		dev_kfree_skb_any(skb);
2705 		return NULL;
2706 	}
2707 
2708 	if (lan78xx_linearize(skb) < 0)
2709 		return NULL;
2710 
2711 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2712 
2713 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2714 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2715 
2716 	tx_cmd_b = 0;
2717 	if (skb_is_gso(skb)) {
2718 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2719 
2720 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2721 
2722 		tx_cmd_a |= TX_CMD_A_LSO_;
2723 	}
2724 
2725 	if (skb_vlan_tag_present(skb)) {
2726 		tx_cmd_a |= TX_CMD_A_IVTG_;
2727 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2728 	}
2729 
2730 	skb_push(skb, 4);
2731 	cpu_to_le32s(&tx_cmd_b);
2732 	memcpy(skb->data, &tx_cmd_b, 4);
2733 
2734 	skb_push(skb, 4);
2735 	cpu_to_le32s(&tx_cmd_a);
2736 	memcpy(skb->data, &tx_cmd_a, 4);
2737 
2738 	return skb;
2739 }
2740 
2741 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2742 			       struct sk_buff_head *list, enum skb_state state)
2743 {
2744 	unsigned long flags;
2745 	enum skb_state old_state;
2746 	struct skb_data *entry = (struct skb_data *)skb->cb;
2747 
2748 	spin_lock_irqsave(&list->lock, flags);
2749 	old_state = entry->state;
2750 	entry->state = state;
2751 
2752 	__skb_unlink(skb, list);
2753 	spin_unlock(&list->lock);
2754 	spin_lock(&dev->done.lock);
2755 
2756 	__skb_queue_tail(&dev->done, skb);
2757 	if (skb_queue_len(&dev->done) == 1)
2758 		tasklet_schedule(&dev->bh);
2759 	spin_unlock_irqrestore(&dev->done.lock, flags);
2760 
2761 	return old_state;
2762 }
2763 
2764 static void tx_complete(struct urb *urb)
2765 {
2766 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2767 	struct skb_data *entry = (struct skb_data *)skb->cb;
2768 	struct lan78xx_net *dev = entry->dev;
2769 
2770 	if (urb->status == 0) {
2771 		dev->net->stats.tx_packets += entry->num_of_packet;
2772 		dev->net->stats.tx_bytes += entry->length;
2773 	} else {
2774 		dev->net->stats.tx_errors++;
2775 
2776 		switch (urb->status) {
2777 		case -EPIPE:
2778 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2779 			break;
2780 
2781 		/* software-driven interface shutdown */
2782 		case -ECONNRESET:
2783 		case -ESHUTDOWN:
2784 			break;
2785 
2786 		case -EPROTO:
2787 		case -ETIME:
2788 		case -EILSEQ:
2789 			netif_stop_queue(dev->net);
2790 			break;
2791 		default:
2792 			netif_dbg(dev, tx_err, dev->net,
2793 				  "tx err %d\n", entry->urb->status);
2794 			break;
2795 		}
2796 	}
2797 
2798 	usb_autopm_put_interface_async(dev->intf);
2799 
2800 	defer_bh(dev, skb, &dev->txq, tx_done);
2801 }
2802 
2803 static void lan78xx_queue_skb(struct sk_buff_head *list,
2804 			      struct sk_buff *newsk, enum skb_state state)
2805 {
2806 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2807 
2808 	__skb_queue_tail(list, newsk);
2809 	entry->state = state;
2810 }
2811 
2812 static netdev_tx_t
2813 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2814 {
2815 	struct lan78xx_net *dev = netdev_priv(net);
2816 	struct sk_buff *skb2 = NULL;
2817 
2818 	if (skb) {
2819 		skb_tx_timestamp(skb);
2820 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2821 	}
2822 
2823 	if (skb2) {
2824 		skb_queue_tail(&dev->txq_pend, skb2);
2825 
2826 		/* throttle TX patch at slower than SUPER SPEED USB */
2827 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2828 		    (skb_queue_len(&dev->txq_pend) > 10))
2829 			netif_stop_queue(net);
2830 	} else {
2831 		netif_dbg(dev, tx_err, dev->net,
2832 			  "lan78xx_tx_prep return NULL\n");
2833 		dev->net->stats.tx_errors++;
2834 		dev->net->stats.tx_dropped++;
2835 	}
2836 
2837 	tasklet_schedule(&dev->bh);
2838 
2839 	return NETDEV_TX_OK;
2840 }
2841 
2842 static int
2843 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2844 {
2845 	int tmp;
2846 	struct usb_host_interface *alt = NULL;
2847 	struct usb_host_endpoint *in = NULL, *out = NULL;
2848 	struct usb_host_endpoint *status = NULL;
2849 
2850 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2851 		unsigned ep;
2852 
2853 		in = NULL;
2854 		out = NULL;
2855 		status = NULL;
2856 		alt = intf->altsetting + tmp;
2857 
2858 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2859 			struct usb_host_endpoint *e;
2860 			int intr = 0;
2861 
2862 			e = alt->endpoint + ep;
2863 			switch (e->desc.bmAttributes) {
2864 			case USB_ENDPOINT_XFER_INT:
2865 				if (!usb_endpoint_dir_in(&e->desc))
2866 					continue;
2867 				intr = 1;
2868 				/* FALLTHROUGH */
2869 			case USB_ENDPOINT_XFER_BULK:
2870 				break;
2871 			default:
2872 				continue;
2873 			}
2874 			if (usb_endpoint_dir_in(&e->desc)) {
2875 				if (!intr && !in)
2876 					in = e;
2877 				else if (intr && !status)
2878 					status = e;
2879 			} else {
2880 				if (!out)
2881 					out = e;
2882 			}
2883 		}
2884 		if (in && out)
2885 			break;
2886 	}
2887 	if (!alt || !in || !out)
2888 		return -EINVAL;
2889 
2890 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2891 				       in->desc.bEndpointAddress &
2892 				       USB_ENDPOINT_NUMBER_MASK);
2893 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2894 					out->desc.bEndpointAddress &
2895 					USB_ENDPOINT_NUMBER_MASK);
2896 	dev->ep_intr = status;
2897 
2898 	return 0;
2899 }
2900 
2901 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2902 {
2903 	struct lan78xx_priv *pdata = NULL;
2904 	int ret;
2905 	int i;
2906 
2907 	ret = lan78xx_get_endpoints(dev, intf);
2908 
2909 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2910 
2911 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2912 	if (!pdata) {
2913 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2914 		return -ENOMEM;
2915 	}
2916 
2917 	pdata->dev = dev;
2918 
2919 	spin_lock_init(&pdata->rfe_ctl_lock);
2920 	mutex_init(&pdata->dataport_mutex);
2921 
2922 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2923 
2924 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2925 		pdata->vlan_table[i] = 0;
2926 
2927 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2928 
2929 	dev->net->features = 0;
2930 
2931 	if (DEFAULT_TX_CSUM_ENABLE)
2932 		dev->net->features |= NETIF_F_HW_CSUM;
2933 
2934 	if (DEFAULT_RX_CSUM_ENABLE)
2935 		dev->net->features |= NETIF_F_RXCSUM;
2936 
2937 	if (DEFAULT_TSO_CSUM_ENABLE)
2938 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2939 
2940 	dev->net->hw_features = dev->net->features;
2941 
2942 	ret = lan78xx_setup_irq_domain(dev);
2943 	if (ret < 0) {
2944 		netdev_warn(dev->net,
2945 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2946 		goto out1;
2947 	}
2948 
2949 	dev->net->hard_header_len += TX_OVERHEAD;
2950 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2951 
2952 	/* Init all registers */
2953 	ret = lan78xx_reset(dev);
2954 	if (ret) {
2955 		netdev_warn(dev->net, "Registers INIT FAILED....");
2956 		goto out2;
2957 	}
2958 
2959 	ret = lan78xx_mdio_init(dev);
2960 	if (ret) {
2961 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2962 		goto out2;
2963 	}
2964 
2965 	dev->net->flags |= IFF_MULTICAST;
2966 
2967 	pdata->wol = WAKE_MAGIC;
2968 
2969 	return ret;
2970 
2971 out2:
2972 	lan78xx_remove_irq_domain(dev);
2973 
2974 out1:
2975 	netdev_warn(dev->net, "Bind routine FAILED");
2976 	cancel_work_sync(&pdata->set_multicast);
2977 	cancel_work_sync(&pdata->set_vlan);
2978 	kfree(pdata);
2979 	return ret;
2980 }
2981 
2982 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2983 {
2984 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2985 
2986 	lan78xx_remove_irq_domain(dev);
2987 
2988 	lan78xx_remove_mdio(dev);
2989 
2990 	if (pdata) {
2991 		cancel_work_sync(&pdata->set_multicast);
2992 		cancel_work_sync(&pdata->set_vlan);
2993 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2994 		kfree(pdata);
2995 		pdata = NULL;
2996 		dev->data[0] = 0;
2997 	}
2998 }
2999 
3000 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3001 				    struct sk_buff *skb,
3002 				    u32 rx_cmd_a, u32 rx_cmd_b)
3003 {
3004 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3005 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
3006 		skb->ip_summed = CHECKSUM_NONE;
3007 	} else {
3008 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3009 		skb->ip_summed = CHECKSUM_COMPLETE;
3010 	}
3011 }
3012 
3013 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3014 {
3015 	int		status;
3016 
3017 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3018 		skb_queue_tail(&dev->rxq_pause, skb);
3019 		return;
3020 	}
3021 
3022 	dev->net->stats.rx_packets++;
3023 	dev->net->stats.rx_bytes += skb->len;
3024 
3025 	skb->protocol = eth_type_trans(skb, dev->net);
3026 
3027 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3028 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3029 	memset(skb->cb, 0, sizeof(struct skb_data));
3030 
3031 	if (skb_defer_rx_timestamp(skb))
3032 		return;
3033 
3034 	status = netif_rx(skb);
3035 	if (status != NET_RX_SUCCESS)
3036 		netif_dbg(dev, rx_err, dev->net,
3037 			  "netif_rx status %d\n", status);
3038 }
3039 
3040 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3041 {
3042 	if (skb->len < dev->net->hard_header_len)
3043 		return 0;
3044 
3045 	while (skb->len > 0) {
3046 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3047 		u16 rx_cmd_c;
3048 		struct sk_buff *skb2;
3049 		unsigned char *packet;
3050 
3051 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3052 		le32_to_cpus(&rx_cmd_a);
3053 		skb_pull(skb, sizeof(rx_cmd_a));
3054 
3055 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3056 		le32_to_cpus(&rx_cmd_b);
3057 		skb_pull(skb, sizeof(rx_cmd_b));
3058 
3059 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3060 		le16_to_cpus(&rx_cmd_c);
3061 		skb_pull(skb, sizeof(rx_cmd_c));
3062 
3063 		packet = skb->data;
3064 
3065 		/* get the packet length */
3066 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3067 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3068 
3069 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3070 			netif_dbg(dev, rx_err, dev->net,
3071 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3072 		} else {
3073 			/* last frame in this batch */
3074 			if (skb->len == size) {
3075 				lan78xx_rx_csum_offload(dev, skb,
3076 							rx_cmd_a, rx_cmd_b);
3077 
3078 				skb_trim(skb, skb->len - 4); /* remove fcs */
3079 				skb->truesize = size + sizeof(struct sk_buff);
3080 
3081 				return 1;
3082 			}
3083 
3084 			skb2 = skb_clone(skb, GFP_ATOMIC);
3085 			if (unlikely(!skb2)) {
3086 				netdev_warn(dev->net, "Error allocating skb");
3087 				return 0;
3088 			}
3089 
3090 			skb2->len = size;
3091 			skb2->data = packet;
3092 			skb_set_tail_pointer(skb2, size);
3093 
3094 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3095 
3096 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3097 			skb2->truesize = size + sizeof(struct sk_buff);
3098 
3099 			lan78xx_skb_return(dev, skb2);
3100 		}
3101 
3102 		skb_pull(skb, size);
3103 
3104 		/* padding bytes before the next frame starts */
3105 		if (skb->len)
3106 			skb_pull(skb, align_count);
3107 	}
3108 
3109 	return 1;
3110 }
3111 
3112 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3113 {
3114 	if (!lan78xx_rx(dev, skb)) {
3115 		dev->net->stats.rx_errors++;
3116 		goto done;
3117 	}
3118 
3119 	if (skb->len) {
3120 		lan78xx_skb_return(dev, skb);
3121 		return;
3122 	}
3123 
3124 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3125 	dev->net->stats.rx_errors++;
3126 done:
3127 	skb_queue_tail(&dev->done, skb);
3128 }
3129 
3130 static void rx_complete(struct urb *urb);
3131 
3132 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3133 {
3134 	struct sk_buff *skb;
3135 	struct skb_data *entry;
3136 	unsigned long lockflags;
3137 	size_t size = dev->rx_urb_size;
3138 	int ret = 0;
3139 
3140 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3141 	if (!skb) {
3142 		usb_free_urb(urb);
3143 		return -ENOMEM;
3144 	}
3145 
3146 	entry = (struct skb_data *)skb->cb;
3147 	entry->urb = urb;
3148 	entry->dev = dev;
3149 	entry->length = 0;
3150 
3151 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3152 			  skb->data, size, rx_complete, skb);
3153 
3154 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3155 
3156 	if (netif_device_present(dev->net) &&
3157 	    netif_running(dev->net) &&
3158 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3159 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3160 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3161 		switch (ret) {
3162 		case 0:
3163 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3164 			break;
3165 		case -EPIPE:
3166 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3167 			break;
3168 		case -ENODEV:
3169 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3170 			netif_device_detach(dev->net);
3171 			break;
3172 		case -EHOSTUNREACH:
3173 			ret = -ENOLINK;
3174 			break;
3175 		default:
3176 			netif_dbg(dev, rx_err, dev->net,
3177 				  "rx submit, %d\n", ret);
3178 			tasklet_schedule(&dev->bh);
3179 		}
3180 	} else {
3181 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3182 		ret = -ENOLINK;
3183 	}
3184 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3185 	if (ret) {
3186 		dev_kfree_skb_any(skb);
3187 		usb_free_urb(urb);
3188 	}
3189 	return ret;
3190 }
3191 
3192 static void rx_complete(struct urb *urb)
3193 {
3194 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3195 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3196 	struct lan78xx_net *dev = entry->dev;
3197 	int urb_status = urb->status;
3198 	enum skb_state state;
3199 
3200 	skb_put(skb, urb->actual_length);
3201 	state = rx_done;
3202 	entry->urb = NULL;
3203 
3204 	switch (urb_status) {
3205 	case 0:
3206 		if (skb->len < dev->net->hard_header_len) {
3207 			state = rx_cleanup;
3208 			dev->net->stats.rx_errors++;
3209 			dev->net->stats.rx_length_errors++;
3210 			netif_dbg(dev, rx_err, dev->net,
3211 				  "rx length %d\n", skb->len);
3212 		}
3213 		usb_mark_last_busy(dev->udev);
3214 		break;
3215 	case -EPIPE:
3216 		dev->net->stats.rx_errors++;
3217 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3218 		/* FALLTHROUGH */
3219 	case -ECONNRESET:				/* async unlink */
3220 	case -ESHUTDOWN:				/* hardware gone */
3221 		netif_dbg(dev, ifdown, dev->net,
3222 			  "rx shutdown, code %d\n", urb_status);
3223 		state = rx_cleanup;
3224 		entry->urb = urb;
3225 		urb = NULL;
3226 		break;
3227 	case -EPROTO:
3228 	case -ETIME:
3229 	case -EILSEQ:
3230 		dev->net->stats.rx_errors++;
3231 		state = rx_cleanup;
3232 		entry->urb = urb;
3233 		urb = NULL;
3234 		break;
3235 
3236 	/* data overrun ... flush fifo? */
3237 	case -EOVERFLOW:
3238 		dev->net->stats.rx_over_errors++;
3239 		/* FALLTHROUGH */
3240 
3241 	default:
3242 		state = rx_cleanup;
3243 		dev->net->stats.rx_errors++;
3244 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3245 		break;
3246 	}
3247 
3248 	state = defer_bh(dev, skb, &dev->rxq, state);
3249 
3250 	if (urb) {
3251 		if (netif_running(dev->net) &&
3252 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3253 		    state != unlink_start) {
3254 			rx_submit(dev, urb, GFP_ATOMIC);
3255 			return;
3256 		}
3257 		usb_free_urb(urb);
3258 	}
3259 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3260 }
3261 
3262 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3263 {
3264 	int length;
3265 	struct urb *urb = NULL;
3266 	struct skb_data *entry;
3267 	unsigned long flags;
3268 	struct sk_buff_head *tqp = &dev->txq_pend;
3269 	struct sk_buff *skb, *skb2;
3270 	int ret;
3271 	int count, pos;
3272 	int skb_totallen, pkt_cnt;
3273 
3274 	skb_totallen = 0;
3275 	pkt_cnt = 0;
3276 	count = 0;
3277 	length = 0;
3278 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3279 		if (skb_is_gso(skb)) {
3280 			if (pkt_cnt) {
3281 				/* handle previous packets first */
3282 				break;
3283 			}
3284 			count = 1;
3285 			length = skb->len - TX_OVERHEAD;
3286 			skb2 = skb_dequeue(tqp);
3287 			goto gso_skb;
3288 		}
3289 
3290 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3291 			break;
3292 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3293 		pkt_cnt++;
3294 	}
3295 
3296 	/* copy to a single skb */
3297 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3298 	if (!skb)
3299 		goto drop;
3300 
3301 	skb_put(skb, skb_totallen);
3302 
3303 	for (count = pos = 0; count < pkt_cnt; count++) {
3304 		skb2 = skb_dequeue(tqp);
3305 		if (skb2) {
3306 			length += (skb2->len - TX_OVERHEAD);
3307 			memcpy(skb->data + pos, skb2->data, skb2->len);
3308 			pos += roundup(skb2->len, sizeof(u32));
3309 			dev_kfree_skb(skb2);
3310 		}
3311 	}
3312 
3313 gso_skb:
3314 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3315 	if (!urb)
3316 		goto drop;
3317 
3318 	entry = (struct skb_data *)skb->cb;
3319 	entry->urb = urb;
3320 	entry->dev = dev;
3321 	entry->length = length;
3322 	entry->num_of_packet = count;
3323 
3324 	spin_lock_irqsave(&dev->txq.lock, flags);
3325 	ret = usb_autopm_get_interface_async(dev->intf);
3326 	if (ret < 0) {
3327 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3328 		goto drop;
3329 	}
3330 
3331 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3332 			  skb->data, skb->len, tx_complete, skb);
3333 
3334 	if (length % dev->maxpacket == 0) {
3335 		/* send USB_ZERO_PACKET */
3336 		urb->transfer_flags |= URB_ZERO_PACKET;
3337 	}
3338 
3339 #ifdef CONFIG_PM
3340 	/* if this triggers the device is still a sleep */
3341 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3342 		/* transmission will be done in resume */
3343 		usb_anchor_urb(urb, &dev->deferred);
3344 		/* no use to process more packets */
3345 		netif_stop_queue(dev->net);
3346 		usb_put_urb(urb);
3347 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3348 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3349 		return;
3350 	}
3351 #endif
3352 
3353 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3354 	switch (ret) {
3355 	case 0:
3356 		netif_trans_update(dev->net);
3357 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3358 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3359 			netif_stop_queue(dev->net);
3360 		break;
3361 	case -EPIPE:
3362 		netif_stop_queue(dev->net);
3363 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3364 		usb_autopm_put_interface_async(dev->intf);
3365 		break;
3366 	default:
3367 		usb_autopm_put_interface_async(dev->intf);
3368 		netif_dbg(dev, tx_err, dev->net,
3369 			  "tx: submit urb err %d\n", ret);
3370 		break;
3371 	}
3372 
3373 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3374 
3375 	if (ret) {
3376 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3377 drop:
3378 		dev->net->stats.tx_dropped++;
3379 		if (skb)
3380 			dev_kfree_skb_any(skb);
3381 		usb_free_urb(urb);
3382 	} else
3383 		netif_dbg(dev, tx_queued, dev->net,
3384 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3385 }
3386 
3387 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3388 {
3389 	struct urb *urb;
3390 	int i;
3391 
3392 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3393 		for (i = 0; i < 10; i++) {
3394 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3395 				break;
3396 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3397 			if (urb)
3398 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3399 					return;
3400 		}
3401 
3402 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3403 			tasklet_schedule(&dev->bh);
3404 	}
3405 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3406 		netif_wake_queue(dev->net);
3407 }
3408 
3409 static void lan78xx_bh(unsigned long param)
3410 {
3411 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3412 	struct sk_buff *skb;
3413 	struct skb_data *entry;
3414 
3415 	while ((skb = skb_dequeue(&dev->done))) {
3416 		entry = (struct skb_data *)(skb->cb);
3417 		switch (entry->state) {
3418 		case rx_done:
3419 			entry->state = rx_cleanup;
3420 			rx_process(dev, skb);
3421 			continue;
3422 		case tx_done:
3423 			usb_free_urb(entry->urb);
3424 			dev_kfree_skb(skb);
3425 			continue;
3426 		case rx_cleanup:
3427 			usb_free_urb(entry->urb);
3428 			dev_kfree_skb(skb);
3429 			continue;
3430 		default:
3431 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3432 			return;
3433 		}
3434 	}
3435 
3436 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3437 		/* reset update timer delta */
3438 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3439 			dev->delta = 1;
3440 			mod_timer(&dev->stat_monitor,
3441 				  jiffies + STAT_UPDATE_TIMER);
3442 		}
3443 
3444 		if (!skb_queue_empty(&dev->txq_pend))
3445 			lan78xx_tx_bh(dev);
3446 
3447 		if (!timer_pending(&dev->delay) &&
3448 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3449 			lan78xx_rx_bh(dev);
3450 	}
3451 }
3452 
3453 static void lan78xx_delayedwork(struct work_struct *work)
3454 {
3455 	int status;
3456 	struct lan78xx_net *dev;
3457 
3458 	dev = container_of(work, struct lan78xx_net, wq.work);
3459 
3460 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3461 		unlink_urbs(dev, &dev->txq);
3462 		status = usb_autopm_get_interface(dev->intf);
3463 		if (status < 0)
3464 			goto fail_pipe;
3465 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3466 		usb_autopm_put_interface(dev->intf);
3467 		if (status < 0 &&
3468 		    status != -EPIPE &&
3469 		    status != -ESHUTDOWN) {
3470 			if (netif_msg_tx_err(dev))
3471 fail_pipe:
3472 				netdev_err(dev->net,
3473 					   "can't clear tx halt, status %d\n",
3474 					   status);
3475 		} else {
3476 			clear_bit(EVENT_TX_HALT, &dev->flags);
3477 			if (status != -ESHUTDOWN)
3478 				netif_wake_queue(dev->net);
3479 		}
3480 	}
3481 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3482 		unlink_urbs(dev, &dev->rxq);
3483 		status = usb_autopm_get_interface(dev->intf);
3484 		if (status < 0)
3485 				goto fail_halt;
3486 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3487 		usb_autopm_put_interface(dev->intf);
3488 		if (status < 0 &&
3489 		    status != -EPIPE &&
3490 		    status != -ESHUTDOWN) {
3491 			if (netif_msg_rx_err(dev))
3492 fail_halt:
3493 				netdev_err(dev->net,
3494 					   "can't clear rx halt, status %d\n",
3495 					   status);
3496 		} else {
3497 			clear_bit(EVENT_RX_HALT, &dev->flags);
3498 			tasklet_schedule(&dev->bh);
3499 		}
3500 	}
3501 
3502 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3503 		int ret = 0;
3504 
3505 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3506 		status = usb_autopm_get_interface(dev->intf);
3507 		if (status < 0)
3508 			goto skip_reset;
3509 		if (lan78xx_link_reset(dev) < 0) {
3510 			usb_autopm_put_interface(dev->intf);
3511 skip_reset:
3512 			netdev_info(dev->net, "link reset failed (%d)\n",
3513 				    ret);
3514 		} else {
3515 			usb_autopm_put_interface(dev->intf);
3516 		}
3517 	}
3518 
3519 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3520 		lan78xx_update_stats(dev);
3521 
3522 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3523 
3524 		mod_timer(&dev->stat_monitor,
3525 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3526 
3527 		dev->delta = min((dev->delta * 2), 50);
3528 	}
3529 }
3530 
3531 static void intr_complete(struct urb *urb)
3532 {
3533 	struct lan78xx_net *dev = urb->context;
3534 	int status = urb->status;
3535 
3536 	switch (status) {
3537 	/* success */
3538 	case 0:
3539 		lan78xx_status(dev, urb);
3540 		break;
3541 
3542 	/* software-driven interface shutdown */
3543 	case -ENOENT:			/* urb killed */
3544 	case -ESHUTDOWN:		/* hardware gone */
3545 		netif_dbg(dev, ifdown, dev->net,
3546 			  "intr shutdown, code %d\n", status);
3547 		return;
3548 
3549 	/* NOTE:  not throttling like RX/TX, since this endpoint
3550 	 * already polls infrequently
3551 	 */
3552 	default:
3553 		netdev_dbg(dev->net, "intr status %d\n", status);
3554 		break;
3555 	}
3556 
3557 	if (!netif_running(dev->net))
3558 		return;
3559 
3560 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3561 	status = usb_submit_urb(urb, GFP_ATOMIC);
3562 	if (status != 0)
3563 		netif_err(dev, timer, dev->net,
3564 			  "intr resubmit --> %d\n", status);
3565 }
3566 
3567 static void lan78xx_disconnect(struct usb_interface *intf)
3568 {
3569 	struct lan78xx_net		*dev;
3570 	struct usb_device		*udev;
3571 	struct net_device		*net;
3572 
3573 	dev = usb_get_intfdata(intf);
3574 	usb_set_intfdata(intf, NULL);
3575 	if (!dev)
3576 		return;
3577 
3578 	udev = interface_to_usbdev(intf);
3579 	net = dev->net;
3580 
3581 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3582 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3583 
3584 	phy_disconnect(net->phydev);
3585 
3586 	unregister_netdev(net);
3587 
3588 	cancel_delayed_work_sync(&dev->wq);
3589 
3590 	usb_scuttle_anchored_urbs(&dev->deferred);
3591 
3592 	lan78xx_unbind(dev, intf);
3593 
3594 	usb_kill_urb(dev->urb_intr);
3595 	usb_free_urb(dev->urb_intr);
3596 
3597 	free_netdev(net);
3598 	usb_put_dev(udev);
3599 }
3600 
3601 static void lan78xx_tx_timeout(struct net_device *net)
3602 {
3603 	struct lan78xx_net *dev = netdev_priv(net);
3604 
3605 	unlink_urbs(dev, &dev->txq);
3606 	tasklet_schedule(&dev->bh);
3607 }
3608 
3609 static const struct net_device_ops lan78xx_netdev_ops = {
3610 	.ndo_open		= lan78xx_open,
3611 	.ndo_stop		= lan78xx_stop,
3612 	.ndo_start_xmit		= lan78xx_start_xmit,
3613 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3614 	.ndo_change_mtu		= lan78xx_change_mtu,
3615 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3616 	.ndo_validate_addr	= eth_validate_addr,
3617 	.ndo_do_ioctl		= lan78xx_ioctl,
3618 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3619 	.ndo_set_features	= lan78xx_set_features,
3620 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3621 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3622 };
3623 
3624 static void lan78xx_stat_monitor(struct timer_list *t)
3625 {
3626 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3627 
3628 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3629 }
3630 
3631 static int lan78xx_probe(struct usb_interface *intf,
3632 			 const struct usb_device_id *id)
3633 {
3634 	struct lan78xx_net *dev;
3635 	struct net_device *netdev;
3636 	struct usb_device *udev;
3637 	int ret;
3638 	unsigned maxp;
3639 	unsigned period;
3640 	u8 *buf = NULL;
3641 
3642 	udev = interface_to_usbdev(intf);
3643 	udev = usb_get_dev(udev);
3644 
3645 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3646 	if (!netdev) {
3647 		dev_err(&intf->dev, "Error: OOM\n");
3648 		ret = -ENOMEM;
3649 		goto out1;
3650 	}
3651 
3652 	/* netdev_printk() needs this */
3653 	SET_NETDEV_DEV(netdev, &intf->dev);
3654 
3655 	dev = netdev_priv(netdev);
3656 	dev->udev = udev;
3657 	dev->intf = intf;
3658 	dev->net = netdev;
3659 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3660 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3661 
3662 	skb_queue_head_init(&dev->rxq);
3663 	skb_queue_head_init(&dev->txq);
3664 	skb_queue_head_init(&dev->done);
3665 	skb_queue_head_init(&dev->rxq_pause);
3666 	skb_queue_head_init(&dev->txq_pend);
3667 	mutex_init(&dev->phy_mutex);
3668 
3669 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3670 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3671 	init_usb_anchor(&dev->deferred);
3672 
3673 	netdev->netdev_ops = &lan78xx_netdev_ops;
3674 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3675 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3676 
3677 	dev->delta = 1;
3678 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3679 
3680 	mutex_init(&dev->stats.access_lock);
3681 
3682 	ret = lan78xx_bind(dev, intf);
3683 	if (ret < 0)
3684 		goto out2;
3685 	strcpy(netdev->name, "eth%d");
3686 
3687 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3688 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3689 
3690 	/* MTU range: 68 - 9000 */
3691 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3692 
3693 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3694 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3695 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3696 
3697 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3698 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3699 
3700 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3701 					dev->ep_intr->desc.bEndpointAddress &
3702 					USB_ENDPOINT_NUMBER_MASK);
3703 	period = dev->ep_intr->desc.bInterval;
3704 
3705 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3706 	buf = kmalloc(maxp, GFP_KERNEL);
3707 	if (buf) {
3708 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3709 		if (!dev->urb_intr) {
3710 			ret = -ENOMEM;
3711 			kfree(buf);
3712 			goto out3;
3713 		} else {
3714 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3715 					 dev->pipe_intr, buf, maxp,
3716 					 intr_complete, dev, period);
3717 		}
3718 	}
3719 
3720 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3721 
3722 	/* driver requires remote-wakeup capability during autosuspend. */
3723 	intf->needs_remote_wakeup = 1;
3724 
3725 	ret = register_netdev(netdev);
3726 	if (ret != 0) {
3727 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3728 		goto out3;
3729 	}
3730 
3731 	usb_set_intfdata(intf, dev);
3732 
3733 	ret = device_set_wakeup_enable(&udev->dev, true);
3734 
3735 	 /* Default delay of 2sec has more overhead than advantage.
3736 	  * Set to 10sec as default.
3737 	  */
3738 	pm_runtime_set_autosuspend_delay(&udev->dev,
3739 					 DEFAULT_AUTOSUSPEND_DELAY);
3740 
3741 	ret = lan78xx_phy_init(dev);
3742 	if (ret < 0)
3743 		goto out4;
3744 
3745 	return 0;
3746 
3747 out4:
3748 	unregister_netdev(netdev);
3749 out3:
3750 	lan78xx_unbind(dev, intf);
3751 out2:
3752 	free_netdev(netdev);
3753 out1:
3754 	usb_put_dev(udev);
3755 
3756 	return ret;
3757 }
3758 
3759 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3760 {
3761 	const u16 crc16poly = 0x8005;
3762 	int i;
3763 	u16 bit, crc, msb;
3764 	u8 data;
3765 
3766 	crc = 0xFFFF;
3767 	for (i = 0; i < len; i++) {
3768 		data = *buf++;
3769 		for (bit = 0; bit < 8; bit++) {
3770 			msb = crc >> 15;
3771 			crc <<= 1;
3772 
3773 			if (msb ^ (u16)(data & 1)) {
3774 				crc ^= crc16poly;
3775 				crc |= (u16)0x0001U;
3776 			}
3777 			data >>= 1;
3778 		}
3779 	}
3780 
3781 	return crc;
3782 }
3783 
3784 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3785 {
3786 	u32 buf;
3787 	int ret;
3788 	int mask_index;
3789 	u16 crc;
3790 	u32 temp_wucsr;
3791 	u32 temp_pmt_ctl;
3792 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3793 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3794 	const u8 arp_type[2] = { 0x08, 0x06 };
3795 
3796 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3797 	buf &= ~MAC_TX_TXEN_;
3798 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3799 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3800 	buf &= ~MAC_RX_RXEN_;
3801 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3802 
3803 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3804 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3805 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3806 
3807 	temp_wucsr = 0;
3808 
3809 	temp_pmt_ctl = 0;
3810 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3811 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3812 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3813 
3814 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3815 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3816 
3817 	mask_index = 0;
3818 	if (wol & WAKE_PHY) {
3819 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3820 
3821 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3822 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3823 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3824 	}
3825 	if (wol & WAKE_MAGIC) {
3826 		temp_wucsr |= WUCSR_MPEN_;
3827 
3828 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3829 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3830 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3831 	}
3832 	if (wol & WAKE_BCAST) {
3833 		temp_wucsr |= WUCSR_BCST_EN_;
3834 
3835 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3836 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3837 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3838 	}
3839 	if (wol & WAKE_MCAST) {
3840 		temp_wucsr |= WUCSR_WAKE_EN_;
3841 
3842 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3843 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3844 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3845 					WUF_CFGX_EN_ |
3846 					WUF_CFGX_TYPE_MCAST_ |
3847 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3848 					(crc & WUF_CFGX_CRC16_MASK_));
3849 
3850 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3851 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3852 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3853 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3854 		mask_index++;
3855 
3856 		/* for IPv6 Multicast */
3857 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3858 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3859 					WUF_CFGX_EN_ |
3860 					WUF_CFGX_TYPE_MCAST_ |
3861 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3862 					(crc & WUF_CFGX_CRC16_MASK_));
3863 
3864 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3865 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3866 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3867 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3868 		mask_index++;
3869 
3870 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3871 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3872 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3873 	}
3874 	if (wol & WAKE_UCAST) {
3875 		temp_wucsr |= WUCSR_PFDA_EN_;
3876 
3877 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3878 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3879 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3880 	}
3881 	if (wol & WAKE_ARP) {
3882 		temp_wucsr |= WUCSR_WAKE_EN_;
3883 
3884 		/* set WUF_CFG & WUF_MASK
3885 		 * for packettype (offset 12,13) = ARP (0x0806)
3886 		 */
3887 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3888 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3889 					WUF_CFGX_EN_ |
3890 					WUF_CFGX_TYPE_ALL_ |
3891 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3892 					(crc & WUF_CFGX_CRC16_MASK_));
3893 
3894 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3895 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3896 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3897 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3898 		mask_index++;
3899 
3900 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3901 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3902 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3903 	}
3904 
3905 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3906 
3907 	/* when multiple WOL bits are set */
3908 	if (hweight_long((unsigned long)wol) > 1) {
3909 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3910 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3911 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3912 	}
3913 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3914 
3915 	/* clear WUPS */
3916 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3917 	buf |= PMT_CTL_WUPS_MASK_;
3918 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3919 
3920 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3921 	buf |= MAC_RX_RXEN_;
3922 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3923 
3924 	return 0;
3925 }
3926 
3927 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3928 {
3929 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3930 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3931 	u32 buf;
3932 	int ret;
3933 	int event;
3934 
3935 	event = message.event;
3936 
3937 	if (!dev->suspend_count++) {
3938 		spin_lock_irq(&dev->txq.lock);
3939 		/* don't autosuspend while transmitting */
3940 		if ((skb_queue_len(&dev->txq) ||
3941 		     skb_queue_len(&dev->txq_pend)) &&
3942 			PMSG_IS_AUTO(message)) {
3943 			spin_unlock_irq(&dev->txq.lock);
3944 			ret = -EBUSY;
3945 			goto out;
3946 		} else {
3947 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3948 			spin_unlock_irq(&dev->txq.lock);
3949 		}
3950 
3951 		/* stop TX & RX */
3952 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3953 		buf &= ~MAC_TX_TXEN_;
3954 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3955 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3956 		buf &= ~MAC_RX_RXEN_;
3957 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3958 
3959 		/* empty out the rx and queues */
3960 		netif_device_detach(dev->net);
3961 		lan78xx_terminate_urbs(dev);
3962 		usb_kill_urb(dev->urb_intr);
3963 
3964 		/* reattach */
3965 		netif_device_attach(dev->net);
3966 	}
3967 
3968 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3969 		del_timer(&dev->stat_monitor);
3970 
3971 		if (PMSG_IS_AUTO(message)) {
3972 			/* auto suspend (selective suspend) */
3973 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3974 			buf &= ~MAC_TX_TXEN_;
3975 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3976 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3977 			buf &= ~MAC_RX_RXEN_;
3978 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3979 
3980 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3981 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3982 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3983 
3984 			/* set goodframe wakeup */
3985 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3986 
3987 			buf |= WUCSR_RFE_WAKE_EN_;
3988 			buf |= WUCSR_STORE_WAKE_;
3989 
3990 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3991 
3992 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3993 
3994 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3995 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3996 
3997 			buf |= PMT_CTL_PHY_WAKE_EN_;
3998 			buf |= PMT_CTL_WOL_EN_;
3999 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4000 			buf |= PMT_CTL_SUS_MODE_3_;
4001 
4002 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4003 
4004 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4005 
4006 			buf |= PMT_CTL_WUPS_MASK_;
4007 
4008 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4009 
4010 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4011 			buf |= MAC_RX_RXEN_;
4012 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4013 		} else {
4014 			lan78xx_set_suspend(dev, pdata->wol);
4015 		}
4016 	}
4017 
4018 	ret = 0;
4019 out:
4020 	return ret;
4021 }
4022 
4023 static int lan78xx_resume(struct usb_interface *intf)
4024 {
4025 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4026 	struct sk_buff *skb;
4027 	struct urb *res;
4028 	int ret;
4029 	u32 buf;
4030 
4031 	if (!timer_pending(&dev->stat_monitor)) {
4032 		dev->delta = 1;
4033 		mod_timer(&dev->stat_monitor,
4034 			  jiffies + STAT_UPDATE_TIMER);
4035 	}
4036 
4037 	if (!--dev->suspend_count) {
4038 		/* resume interrupt URBs */
4039 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4040 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4041 
4042 		spin_lock_irq(&dev->txq.lock);
4043 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4044 			skb = (struct sk_buff *)res->context;
4045 			ret = usb_submit_urb(res, GFP_ATOMIC);
4046 			if (ret < 0) {
4047 				dev_kfree_skb_any(skb);
4048 				usb_free_urb(res);
4049 				usb_autopm_put_interface_async(dev->intf);
4050 			} else {
4051 				netif_trans_update(dev->net);
4052 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4053 			}
4054 		}
4055 
4056 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4057 		spin_unlock_irq(&dev->txq.lock);
4058 
4059 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4060 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4061 				netif_start_queue(dev->net);
4062 			tasklet_schedule(&dev->bh);
4063 		}
4064 	}
4065 
4066 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4067 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4068 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4069 
4070 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4071 					     WUCSR2_ARP_RCD_ |
4072 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4073 					     WUCSR2_IPV4_TCPSYN_RCD_);
4074 
4075 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4076 					    WUCSR_EEE_RX_WAKE_ |
4077 					    WUCSR_PFDA_FR_ |
4078 					    WUCSR_RFE_WAKE_FR_ |
4079 					    WUCSR_WUFR_ |
4080 					    WUCSR_MPR_ |
4081 					    WUCSR_BCST_FR_);
4082 
4083 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4084 	buf |= MAC_TX_TXEN_;
4085 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4086 
4087 	return 0;
4088 }
4089 
4090 static int lan78xx_reset_resume(struct usb_interface *intf)
4091 {
4092 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4093 
4094 	lan78xx_reset(dev);
4095 
4096 	phy_start(dev->net->phydev);
4097 
4098 	return lan78xx_resume(intf);
4099 }
4100 
4101 static const struct usb_device_id products[] = {
4102 	{
4103 	/* LAN7800 USB Gigabit Ethernet Device */
4104 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4105 	},
4106 	{
4107 	/* LAN7850 USB Gigabit Ethernet Device */
4108 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4109 	},
4110 	{
4111 	/* LAN7801 USB Gigabit Ethernet Device */
4112 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4113 	},
4114 	{},
4115 };
4116 MODULE_DEVICE_TABLE(usb, products);
4117 
4118 static struct usb_driver lan78xx_driver = {
4119 	.name			= DRIVER_NAME,
4120 	.id_table		= products,
4121 	.probe			= lan78xx_probe,
4122 	.disconnect		= lan78xx_disconnect,
4123 	.suspend		= lan78xx_suspend,
4124 	.resume			= lan78xx_resume,
4125 	.reset_resume		= lan78xx_reset_resume,
4126 	.supports_autosuspend	= 1,
4127 	.disable_hub_initiated_lpm = 1,
4128 };
4129 
4130 module_usb_driver(lan78xx_driver);
4131 
4132 MODULE_AUTHOR(DRIVER_AUTHOR);
4133 MODULE_DESCRIPTION(DRIVER_DESC);
4134 MODULE_LICENSE("GPL");
4135