xref: /linux/drivers/net/usb/lan78xx.c (revision 4f58e6dceb0e44ca8f21568ed81e1df24e55964c)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35 
36 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME	"lan78xx"
39 #define DRIVER_VERSION	"1.0.4"
40 
41 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
42 #define THROTTLE_JIFFIES		(HZ / 8)
43 #define UNLINK_TIMEOUT_MS		3
44 
45 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
46 
47 #define SS_USB_PKT_SIZE			(1024)
48 #define HS_USB_PKT_SIZE			(512)
49 #define FS_USB_PKT_SIZE			(64)
50 
51 #define MAX_RX_FIFO_SIZE		(12 * 1024)
52 #define MAX_TX_FIFO_SIZE		(12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY		(0x0800)
55 #define MAX_SINGLE_PACKET_SIZE		(9000)
56 #define DEFAULT_TX_CSUM_ENABLE		(true)
57 #define DEFAULT_RX_CSUM_ENABLE		(true)
58 #define DEFAULT_TSO_CSUM_ENABLE		(true)
59 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
60 #define TX_OVERHEAD			(8)
61 #define RXW_PADDING			2
62 
63 #define LAN78XX_USB_VENDOR_ID		(0x0424)
64 #define LAN7800_USB_PRODUCT_ID		(0x7800)
65 #define LAN7850_USB_PRODUCT_ID		(0x7850)
66 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
67 #define LAN78XX_OTP_MAGIC		(0x78F3)
68 
69 #define	MII_READ			1
70 #define	MII_WRITE			0
71 
72 #define EEPROM_INDICATOR		(0xA5)
73 #define EEPROM_MAC_OFFSET		(0x01)
74 #define MAX_EEPROM_SIZE			512
75 #define OTP_INDICATOR_1			(0xF3)
76 #define OTP_INDICATOR_2			(0xF7)
77 
78 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
79 					 WAKE_MCAST | WAKE_BCAST | \
80 					 WAKE_ARP | WAKE_MAGIC)
81 
82 /* USB related defines */
83 #define BULK_IN_PIPE			1
84 #define BULK_OUT_PIPE			2
85 
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
88 
89 /* statistic update interval (mSec) */
90 #define STAT_UPDATE_TIMER		(1 * 1000)
91 
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93 	"RX FCS Errors",
94 	"RX Alignment Errors",
95 	"Rx Fragment Errors",
96 	"RX Jabber Errors",
97 	"RX Undersize Frame Errors",
98 	"RX Oversize Frame Errors",
99 	"RX Dropped Frames",
100 	"RX Unicast Byte Count",
101 	"RX Broadcast Byte Count",
102 	"RX Multicast Byte Count",
103 	"RX Unicast Frames",
104 	"RX Broadcast Frames",
105 	"RX Multicast Frames",
106 	"RX Pause Frames",
107 	"RX 64 Byte Frames",
108 	"RX 65 - 127 Byte Frames",
109 	"RX 128 - 255 Byte Frames",
110 	"RX 256 - 511 Bytes Frames",
111 	"RX 512 - 1023 Byte Frames",
112 	"RX 1024 - 1518 Byte Frames",
113 	"RX Greater 1518 Byte Frames",
114 	"EEE RX LPI Transitions",
115 	"EEE RX LPI Time",
116 	"TX FCS Errors",
117 	"TX Excess Deferral Errors",
118 	"TX Carrier Errors",
119 	"TX Bad Byte Count",
120 	"TX Single Collisions",
121 	"TX Multiple Collisions",
122 	"TX Excessive Collision",
123 	"TX Late Collisions",
124 	"TX Unicast Byte Count",
125 	"TX Broadcast Byte Count",
126 	"TX Multicast Byte Count",
127 	"TX Unicast Frames",
128 	"TX Broadcast Frames",
129 	"TX Multicast Frames",
130 	"TX Pause Frames",
131 	"TX 64 Byte Frames",
132 	"TX 65 - 127 Byte Frames",
133 	"TX 128 - 255 Byte Frames",
134 	"TX 256 - 511 Bytes Frames",
135 	"TX 512 - 1023 Byte Frames",
136 	"TX 1024 - 1518 Byte Frames",
137 	"TX Greater 1518 Byte Frames",
138 	"EEE TX LPI Transitions",
139 	"EEE TX LPI Time",
140 };
141 
142 struct lan78xx_statstage {
143 	u32 rx_fcs_errors;
144 	u32 rx_alignment_errors;
145 	u32 rx_fragment_errors;
146 	u32 rx_jabber_errors;
147 	u32 rx_undersize_frame_errors;
148 	u32 rx_oversize_frame_errors;
149 	u32 rx_dropped_frames;
150 	u32 rx_unicast_byte_count;
151 	u32 rx_broadcast_byte_count;
152 	u32 rx_multicast_byte_count;
153 	u32 rx_unicast_frames;
154 	u32 rx_broadcast_frames;
155 	u32 rx_multicast_frames;
156 	u32 rx_pause_frames;
157 	u32 rx_64_byte_frames;
158 	u32 rx_65_127_byte_frames;
159 	u32 rx_128_255_byte_frames;
160 	u32 rx_256_511_bytes_frames;
161 	u32 rx_512_1023_byte_frames;
162 	u32 rx_1024_1518_byte_frames;
163 	u32 rx_greater_1518_byte_frames;
164 	u32 eee_rx_lpi_transitions;
165 	u32 eee_rx_lpi_time;
166 	u32 tx_fcs_errors;
167 	u32 tx_excess_deferral_errors;
168 	u32 tx_carrier_errors;
169 	u32 tx_bad_byte_count;
170 	u32 tx_single_collisions;
171 	u32 tx_multiple_collisions;
172 	u32 tx_excessive_collision;
173 	u32 tx_late_collisions;
174 	u32 tx_unicast_byte_count;
175 	u32 tx_broadcast_byte_count;
176 	u32 tx_multicast_byte_count;
177 	u32 tx_unicast_frames;
178 	u32 tx_broadcast_frames;
179 	u32 tx_multicast_frames;
180 	u32 tx_pause_frames;
181 	u32 tx_64_byte_frames;
182 	u32 tx_65_127_byte_frames;
183 	u32 tx_128_255_byte_frames;
184 	u32 tx_256_511_bytes_frames;
185 	u32 tx_512_1023_byte_frames;
186 	u32 tx_1024_1518_byte_frames;
187 	u32 tx_greater_1518_byte_frames;
188 	u32 eee_tx_lpi_transitions;
189 	u32 eee_tx_lpi_time;
190 };
191 
192 struct lan78xx_statstage64 {
193 	u64 rx_fcs_errors;
194 	u64 rx_alignment_errors;
195 	u64 rx_fragment_errors;
196 	u64 rx_jabber_errors;
197 	u64 rx_undersize_frame_errors;
198 	u64 rx_oversize_frame_errors;
199 	u64 rx_dropped_frames;
200 	u64 rx_unicast_byte_count;
201 	u64 rx_broadcast_byte_count;
202 	u64 rx_multicast_byte_count;
203 	u64 rx_unicast_frames;
204 	u64 rx_broadcast_frames;
205 	u64 rx_multicast_frames;
206 	u64 rx_pause_frames;
207 	u64 rx_64_byte_frames;
208 	u64 rx_65_127_byte_frames;
209 	u64 rx_128_255_byte_frames;
210 	u64 rx_256_511_bytes_frames;
211 	u64 rx_512_1023_byte_frames;
212 	u64 rx_1024_1518_byte_frames;
213 	u64 rx_greater_1518_byte_frames;
214 	u64 eee_rx_lpi_transitions;
215 	u64 eee_rx_lpi_time;
216 	u64 tx_fcs_errors;
217 	u64 tx_excess_deferral_errors;
218 	u64 tx_carrier_errors;
219 	u64 tx_bad_byte_count;
220 	u64 tx_single_collisions;
221 	u64 tx_multiple_collisions;
222 	u64 tx_excessive_collision;
223 	u64 tx_late_collisions;
224 	u64 tx_unicast_byte_count;
225 	u64 tx_broadcast_byte_count;
226 	u64 tx_multicast_byte_count;
227 	u64 tx_unicast_frames;
228 	u64 tx_broadcast_frames;
229 	u64 tx_multicast_frames;
230 	u64 tx_pause_frames;
231 	u64 tx_64_byte_frames;
232 	u64 tx_65_127_byte_frames;
233 	u64 tx_128_255_byte_frames;
234 	u64 tx_256_511_bytes_frames;
235 	u64 tx_512_1023_byte_frames;
236 	u64 tx_1024_1518_byte_frames;
237 	u64 tx_greater_1518_byte_frames;
238 	u64 eee_tx_lpi_transitions;
239 	u64 eee_tx_lpi_time;
240 };
241 
242 struct lan78xx_net;
243 
244 struct lan78xx_priv {
245 	struct lan78xx_net *dev;
246 	u32 rfe_ctl;
247 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
248 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
249 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
250 	struct mutex dataport_mutex; /* for dataport access */
251 	spinlock_t rfe_ctl_lock; /* for rfe register access */
252 	struct work_struct set_multicast;
253 	struct work_struct set_vlan;
254 	u32 wol;
255 };
256 
257 enum skb_state {
258 	illegal = 0,
259 	tx_start,
260 	tx_done,
261 	rx_start,
262 	rx_done,
263 	rx_cleanup,
264 	unlink_start
265 };
266 
267 struct skb_data {		/* skb->cb is one of these */
268 	struct urb *urb;
269 	struct lan78xx_net *dev;
270 	enum skb_state state;
271 	size_t length;
272 	int num_of_packet;
273 };
274 
275 struct usb_context {
276 	struct usb_ctrlrequest req;
277 	struct lan78xx_net *dev;
278 };
279 
280 #define EVENT_TX_HALT			0
281 #define EVENT_RX_HALT			1
282 #define EVENT_RX_MEMORY			2
283 #define EVENT_STS_SPLIT			3
284 #define EVENT_LINK_RESET		4
285 #define EVENT_RX_PAUSED			5
286 #define EVENT_DEV_WAKING		6
287 #define EVENT_DEV_ASLEEP		7
288 #define EVENT_DEV_OPEN			8
289 #define EVENT_STAT_UPDATE		9
290 
291 struct statstage {
292 	struct mutex			access_lock;	/* for stats access */
293 	struct lan78xx_statstage	saved;
294 	struct lan78xx_statstage	rollover_count;
295 	struct lan78xx_statstage	rollover_max;
296 	struct lan78xx_statstage64	curr_stat;
297 };
298 
299 struct lan78xx_net {
300 	struct net_device	*net;
301 	struct usb_device	*udev;
302 	struct usb_interface	*intf;
303 	void			*driver_priv;
304 
305 	int			rx_qlen;
306 	int			tx_qlen;
307 	struct sk_buff_head	rxq;
308 	struct sk_buff_head	txq;
309 	struct sk_buff_head	done;
310 	struct sk_buff_head	rxq_pause;
311 	struct sk_buff_head	txq_pend;
312 
313 	struct tasklet_struct	bh;
314 	struct delayed_work	wq;
315 
316 	struct usb_host_endpoint *ep_blkin;
317 	struct usb_host_endpoint *ep_blkout;
318 	struct usb_host_endpoint *ep_intr;
319 
320 	int			msg_enable;
321 
322 	struct urb		*urb_intr;
323 	struct usb_anchor	deferred;
324 
325 	struct mutex		phy_mutex; /* for phy access */
326 	unsigned		pipe_in, pipe_out, pipe_intr;
327 
328 	u32			hard_mtu;	/* count any extra framing */
329 	size_t			rx_urb_size;	/* size for rx urbs */
330 
331 	unsigned long		flags;
332 
333 	wait_queue_head_t	*wait;
334 	unsigned char		suspend_count;
335 
336 	unsigned		maxpacket;
337 	struct timer_list	delay;
338 	struct timer_list	stat_monitor;
339 
340 	unsigned long		data[5];
341 
342 	int			link_on;
343 	u8			mdix_ctrl;
344 
345 	u32			chipid;
346 	u32			chiprev;
347 	struct mii_bus		*mdiobus;
348 
349 	int			fc_autoneg;
350 	u8			fc_request_control;
351 
352 	int			delta;
353 	struct statstage	stats;
354 };
355 
356 /* use ethtool to change the level for any given device */
357 static int msg_level = -1;
358 module_param(msg_level, int, 0);
359 MODULE_PARM_DESC(msg_level, "Override default message level");
360 
361 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
362 {
363 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
364 	int ret;
365 
366 	if (!buf)
367 		return -ENOMEM;
368 
369 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
370 			      USB_VENDOR_REQUEST_READ_REGISTER,
371 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
372 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
373 	if (likely(ret >= 0)) {
374 		le32_to_cpus(buf);
375 		*data = *buf;
376 	} else {
377 		netdev_warn(dev->net,
378 			    "Failed to read register index 0x%08x. ret = %d",
379 			    index, ret);
380 	}
381 
382 	kfree(buf);
383 
384 	return ret;
385 }
386 
387 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
388 {
389 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
390 	int ret;
391 
392 	if (!buf)
393 		return -ENOMEM;
394 
395 	*buf = data;
396 	cpu_to_le32s(buf);
397 
398 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
399 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
400 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
401 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
402 	if (unlikely(ret < 0)) {
403 		netdev_warn(dev->net,
404 			    "Failed to write register index 0x%08x. ret = %d",
405 			    index, ret);
406 	}
407 
408 	kfree(buf);
409 
410 	return ret;
411 }
412 
413 static int lan78xx_read_stats(struct lan78xx_net *dev,
414 			      struct lan78xx_statstage *data)
415 {
416 	int ret = 0;
417 	int i;
418 	struct lan78xx_statstage *stats;
419 	u32 *src;
420 	u32 *dst;
421 
422 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
423 	if (!stats)
424 		return -ENOMEM;
425 
426 	ret = usb_control_msg(dev->udev,
427 			      usb_rcvctrlpipe(dev->udev, 0),
428 			      USB_VENDOR_REQUEST_GET_STATS,
429 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
430 			      0,
431 			      0,
432 			      (void *)stats,
433 			      sizeof(*stats),
434 			      USB_CTRL_SET_TIMEOUT);
435 	if (likely(ret >= 0)) {
436 		src = (u32 *)stats;
437 		dst = (u32 *)data;
438 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
439 			le32_to_cpus(&src[i]);
440 			dst[i] = src[i];
441 		}
442 	} else {
443 		netdev_warn(dev->net,
444 			    "Failed to read stat ret = 0x%x", ret);
445 	}
446 
447 	kfree(stats);
448 
449 	return ret;
450 }
451 
452 #define check_counter_rollover(struct1, dev_stats, member) {	\
453 	if (struct1->member < dev_stats.saved.member)		\
454 		dev_stats.rollover_count.member++;		\
455 	}
456 
457 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
458 					struct lan78xx_statstage *stats)
459 {
460 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
461 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
462 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
463 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
464 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
465 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
466 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
467 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
468 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
469 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
470 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
471 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
472 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
473 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
474 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
475 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
476 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
477 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
478 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
479 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
480 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
481 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
482 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
483 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
484 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
485 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
486 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
487 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
488 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
489 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
490 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
491 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
492 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
493 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
494 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
495 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
496 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
497 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
498 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
499 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
500 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
501 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
502 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
503 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
504 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
505 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
506 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
507 
508 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
509 }
510 
511 static void lan78xx_update_stats(struct lan78xx_net *dev)
512 {
513 	u32 *p, *count, *max;
514 	u64 *data;
515 	int i;
516 	struct lan78xx_statstage lan78xx_stats;
517 
518 	if (usb_autopm_get_interface(dev->intf) < 0)
519 		return;
520 
521 	p = (u32 *)&lan78xx_stats;
522 	count = (u32 *)&dev->stats.rollover_count;
523 	max = (u32 *)&dev->stats.rollover_max;
524 	data = (u64 *)&dev->stats.curr_stat;
525 
526 	mutex_lock(&dev->stats.access_lock);
527 
528 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
529 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
530 
531 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
532 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
533 
534 	mutex_unlock(&dev->stats.access_lock);
535 
536 	usb_autopm_put_interface(dev->intf);
537 }
538 
539 /* Loop until the read is completed with timeout called with phy_mutex held */
540 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
541 {
542 	unsigned long start_time = jiffies;
543 	u32 val;
544 	int ret;
545 
546 	do {
547 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
548 		if (unlikely(ret < 0))
549 			return -EIO;
550 
551 		if (!(val & MII_ACC_MII_BUSY_))
552 			return 0;
553 	} while (!time_after(jiffies, start_time + HZ));
554 
555 	return -EIO;
556 }
557 
558 static inline u32 mii_access(int id, int index, int read)
559 {
560 	u32 ret;
561 
562 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
563 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
564 	if (read)
565 		ret |= MII_ACC_MII_READ_;
566 	else
567 		ret |= MII_ACC_MII_WRITE_;
568 	ret |= MII_ACC_MII_BUSY_;
569 
570 	return ret;
571 }
572 
573 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
574 {
575 	unsigned long start_time = jiffies;
576 	u32 val;
577 	int ret;
578 
579 	do {
580 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
581 		if (unlikely(ret < 0))
582 			return -EIO;
583 
584 		if (!(val & E2P_CMD_EPC_BUSY_) ||
585 		    (val & E2P_CMD_EPC_TIMEOUT_))
586 			break;
587 		usleep_range(40, 100);
588 	} while (!time_after(jiffies, start_time + HZ));
589 
590 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
591 		netdev_warn(dev->net, "EEPROM read operation timeout");
592 		return -EIO;
593 	}
594 
595 	return 0;
596 }
597 
598 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
599 {
600 	unsigned long start_time = jiffies;
601 	u32 val;
602 	int ret;
603 
604 	do {
605 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
606 		if (unlikely(ret < 0))
607 			return -EIO;
608 
609 		if (!(val & E2P_CMD_EPC_BUSY_))
610 			return 0;
611 
612 		usleep_range(40, 100);
613 	} while (!time_after(jiffies, start_time + HZ));
614 
615 	netdev_warn(dev->net, "EEPROM is busy");
616 	return -EIO;
617 }
618 
619 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
620 				   u32 length, u8 *data)
621 {
622 	u32 val;
623 	u32 saved;
624 	int i, ret;
625 	int retval;
626 
627 	/* depends on chip, some EEPROM pins are muxed with LED function.
628 	 * disable & restore LED function to access EEPROM.
629 	 */
630 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
631 	saved = val;
632 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
633 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
634 		ret = lan78xx_write_reg(dev, HW_CFG, val);
635 	}
636 
637 	retval = lan78xx_eeprom_confirm_not_busy(dev);
638 	if (retval)
639 		return retval;
640 
641 	for (i = 0; i < length; i++) {
642 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
643 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
644 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
645 		if (unlikely(ret < 0)) {
646 			retval = -EIO;
647 			goto exit;
648 		}
649 
650 		retval = lan78xx_wait_eeprom(dev);
651 		if (retval < 0)
652 			goto exit;
653 
654 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
655 		if (unlikely(ret < 0)) {
656 			retval = -EIO;
657 			goto exit;
658 		}
659 
660 		data[i] = val & 0xFF;
661 		offset++;
662 	}
663 
664 	retval = 0;
665 exit:
666 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
667 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
668 
669 	return retval;
670 }
671 
672 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
673 			       u32 length, u8 *data)
674 {
675 	u8 sig;
676 	int ret;
677 
678 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
679 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
680 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
681 	else
682 		ret = -EINVAL;
683 
684 	return ret;
685 }
686 
687 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
688 				    u32 length, u8 *data)
689 {
690 	u32 val;
691 	u32 saved;
692 	int i, ret;
693 	int retval;
694 
695 	/* depends on chip, some EEPROM pins are muxed with LED function.
696 	 * disable & restore LED function to access EEPROM.
697 	 */
698 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
699 	saved = val;
700 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
701 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
702 		ret = lan78xx_write_reg(dev, HW_CFG, val);
703 	}
704 
705 	retval = lan78xx_eeprom_confirm_not_busy(dev);
706 	if (retval)
707 		goto exit;
708 
709 	/* Issue write/erase enable command */
710 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
711 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
712 	if (unlikely(ret < 0)) {
713 		retval = -EIO;
714 		goto exit;
715 	}
716 
717 	retval = lan78xx_wait_eeprom(dev);
718 	if (retval < 0)
719 		goto exit;
720 
721 	for (i = 0; i < length; i++) {
722 		/* Fill data register */
723 		val = data[i];
724 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
725 		if (ret < 0) {
726 			retval = -EIO;
727 			goto exit;
728 		}
729 
730 		/* Send "write" command */
731 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
732 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
733 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
734 		if (ret < 0) {
735 			retval = -EIO;
736 			goto exit;
737 		}
738 
739 		retval = lan78xx_wait_eeprom(dev);
740 		if (retval < 0)
741 			goto exit;
742 
743 		offset++;
744 	}
745 
746 	retval = 0;
747 exit:
748 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
749 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
750 
751 	return retval;
752 }
753 
754 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
755 				u32 length, u8 *data)
756 {
757 	int i;
758 	int ret;
759 	u32 buf;
760 	unsigned long timeout;
761 
762 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
763 
764 	if (buf & OTP_PWR_DN_PWRDN_N_) {
765 		/* clear it and wait to be cleared */
766 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
767 
768 		timeout = jiffies + HZ;
769 		do {
770 			usleep_range(1, 10);
771 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
772 			if (time_after(jiffies, timeout)) {
773 				netdev_warn(dev->net,
774 					    "timeout on OTP_PWR_DN");
775 				return -EIO;
776 			}
777 		} while (buf & OTP_PWR_DN_PWRDN_N_);
778 	}
779 
780 	for (i = 0; i < length; i++) {
781 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
782 					((offset + i) >> 8) & OTP_ADDR1_15_11);
783 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
784 					((offset + i) & OTP_ADDR2_10_3));
785 
786 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
787 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
788 
789 		timeout = jiffies + HZ;
790 		do {
791 			udelay(1);
792 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
793 			if (time_after(jiffies, timeout)) {
794 				netdev_warn(dev->net,
795 					    "timeout on OTP_STATUS");
796 				return -EIO;
797 			}
798 		} while (buf & OTP_STATUS_BUSY_);
799 
800 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
801 
802 		data[i] = (u8)(buf & 0xFF);
803 	}
804 
805 	return 0;
806 }
807 
808 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
809 				 u32 length, u8 *data)
810 {
811 	int i;
812 	int ret;
813 	u32 buf;
814 	unsigned long timeout;
815 
816 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817 
818 	if (buf & OTP_PWR_DN_PWRDN_N_) {
819 		/* clear it and wait to be cleared */
820 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821 
822 		timeout = jiffies + HZ;
823 		do {
824 			udelay(1);
825 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
826 			if (time_after(jiffies, timeout)) {
827 				netdev_warn(dev->net,
828 					    "timeout on OTP_PWR_DN completion");
829 				return -EIO;
830 			}
831 		} while (buf & OTP_PWR_DN_PWRDN_N_);
832 	}
833 
834 	/* set to BYTE program mode */
835 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
836 
837 	for (i = 0; i < length; i++) {
838 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
839 					((offset + i) >> 8) & OTP_ADDR1_15_11);
840 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
841 					((offset + i) & OTP_ADDR2_10_3));
842 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
843 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
844 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
845 
846 		timeout = jiffies + HZ;
847 		do {
848 			udelay(1);
849 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
850 			if (time_after(jiffies, timeout)) {
851 				netdev_warn(dev->net,
852 					    "Timeout on OTP_STATUS completion");
853 				return -EIO;
854 			}
855 		} while (buf & OTP_STATUS_BUSY_);
856 	}
857 
858 	return 0;
859 }
860 
861 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
862 			    u32 length, u8 *data)
863 {
864 	u8 sig;
865 	int ret;
866 
867 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
868 
869 	if (ret == 0) {
870 		if (sig == OTP_INDICATOR_1)
871 			offset = offset;
872 		else if (sig == OTP_INDICATOR_2)
873 			offset += 0x100;
874 		else
875 			ret = -EINVAL;
876 		ret = lan78xx_read_raw_otp(dev, offset, length, data);
877 	}
878 
879 	return ret;
880 }
881 
882 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
883 {
884 	int i, ret;
885 
886 	for (i = 0; i < 100; i++) {
887 		u32 dp_sel;
888 
889 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
890 		if (unlikely(ret < 0))
891 			return -EIO;
892 
893 		if (dp_sel & DP_SEL_DPRDY_)
894 			return 0;
895 
896 		usleep_range(40, 100);
897 	}
898 
899 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
900 
901 	return -EIO;
902 }
903 
904 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
905 				  u32 addr, u32 length, u32 *buf)
906 {
907 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
908 	u32 dp_sel;
909 	int i, ret;
910 
911 	if (usb_autopm_get_interface(dev->intf) < 0)
912 			return 0;
913 
914 	mutex_lock(&pdata->dataport_mutex);
915 
916 	ret = lan78xx_dataport_wait_not_busy(dev);
917 	if (ret < 0)
918 		goto done;
919 
920 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
921 
922 	dp_sel &= ~DP_SEL_RSEL_MASK_;
923 	dp_sel |= ram_select;
924 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
925 
926 	for (i = 0; i < length; i++) {
927 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
928 
929 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
930 
931 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
932 
933 		ret = lan78xx_dataport_wait_not_busy(dev);
934 		if (ret < 0)
935 			goto done;
936 	}
937 
938 done:
939 	mutex_unlock(&pdata->dataport_mutex);
940 	usb_autopm_put_interface(dev->intf);
941 
942 	return ret;
943 }
944 
945 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
946 				    int index, u8 addr[ETH_ALEN])
947 {
948 	u32	temp;
949 
950 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
951 		temp = addr[3];
952 		temp = addr[2] | (temp << 8);
953 		temp = addr[1] | (temp << 8);
954 		temp = addr[0] | (temp << 8);
955 		pdata->pfilter_table[index][1] = temp;
956 		temp = addr[5];
957 		temp = addr[4] | (temp << 8);
958 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
959 		pdata->pfilter_table[index][0] = temp;
960 	}
961 }
962 
963 /* returns hash bit number for given MAC address */
964 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
965 {
966 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
967 }
968 
969 static void lan78xx_deferred_multicast_write(struct work_struct *param)
970 {
971 	struct lan78xx_priv *pdata =
972 			container_of(param, struct lan78xx_priv, set_multicast);
973 	struct lan78xx_net *dev = pdata->dev;
974 	int i;
975 	int ret;
976 
977 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
978 		  pdata->rfe_ctl);
979 
980 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
981 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
982 
983 	for (i = 1; i < NUM_OF_MAF; i++) {
984 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
985 		ret = lan78xx_write_reg(dev, MAF_LO(i),
986 					pdata->pfilter_table[i][1]);
987 		ret = lan78xx_write_reg(dev, MAF_HI(i),
988 					pdata->pfilter_table[i][0]);
989 	}
990 
991 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
992 }
993 
994 static void lan78xx_set_multicast(struct net_device *netdev)
995 {
996 	struct lan78xx_net *dev = netdev_priv(netdev);
997 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
998 	unsigned long flags;
999 	int i;
1000 
1001 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1002 
1003 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1004 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1005 
1006 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1007 			pdata->mchash_table[i] = 0;
1008 	/* pfilter_table[0] has own HW address */
1009 	for (i = 1; i < NUM_OF_MAF; i++) {
1010 			pdata->pfilter_table[i][0] =
1011 			pdata->pfilter_table[i][1] = 0;
1012 	}
1013 
1014 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1015 
1016 	if (dev->net->flags & IFF_PROMISC) {
1017 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1018 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1019 	} else {
1020 		if (dev->net->flags & IFF_ALLMULTI) {
1021 			netif_dbg(dev, drv, dev->net,
1022 				  "receive all multicast enabled");
1023 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1024 		}
1025 	}
1026 
1027 	if (netdev_mc_count(dev->net)) {
1028 		struct netdev_hw_addr *ha;
1029 		int i;
1030 
1031 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1032 
1033 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1034 
1035 		i = 1;
1036 		netdev_for_each_mc_addr(ha, netdev) {
1037 			/* set first 32 into Perfect Filter */
1038 			if (i < 33) {
1039 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1040 			} else {
1041 				u32 bitnum = lan78xx_hash(ha->addr);
1042 
1043 				pdata->mchash_table[bitnum / 32] |=
1044 							(1 << (bitnum % 32));
1045 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1046 			}
1047 			i++;
1048 		}
1049 	}
1050 
1051 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1052 
1053 	/* defer register writes to a sleepable context */
1054 	schedule_work(&pdata->set_multicast);
1055 }
1056 
1057 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1058 				      u16 lcladv, u16 rmtadv)
1059 {
1060 	u32 flow = 0, fct_flow = 0;
1061 	int ret;
1062 	u8 cap;
1063 
1064 	if (dev->fc_autoneg)
1065 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1066 	else
1067 		cap = dev->fc_request_control;
1068 
1069 	if (cap & FLOW_CTRL_TX)
1070 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1071 
1072 	if (cap & FLOW_CTRL_RX)
1073 		flow |= FLOW_CR_RX_FCEN_;
1074 
1075 	if (dev->udev->speed == USB_SPEED_SUPER)
1076 		fct_flow = 0x817;
1077 	else if (dev->udev->speed == USB_SPEED_HIGH)
1078 		fct_flow = 0x211;
1079 
1080 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1081 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1082 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1083 
1084 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1085 
1086 	/* threshold value should be set before enabling flow */
1087 	ret = lan78xx_write_reg(dev, FLOW, flow);
1088 
1089 	return 0;
1090 }
1091 
1092 static int lan78xx_link_reset(struct lan78xx_net *dev)
1093 {
1094 	struct phy_device *phydev = dev->net->phydev;
1095 	struct ethtool_link_ksettings ecmd;
1096 	int ladv, radv, ret;
1097 	u32 buf;
1098 
1099 	/* clear PHY interrupt status */
1100 	ret = phy_read(phydev, LAN88XX_INT_STS);
1101 	if (unlikely(ret < 0))
1102 		return -EIO;
1103 
1104 	/* clear LAN78xx interrupt status */
1105 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1106 	if (unlikely(ret < 0))
1107 		return -EIO;
1108 
1109 	phy_read_status(phydev);
1110 
1111 	if (!phydev->link && dev->link_on) {
1112 		dev->link_on = false;
1113 
1114 		/* reset MAC */
1115 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1116 		if (unlikely(ret < 0))
1117 			return -EIO;
1118 		buf |= MAC_CR_RST_;
1119 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1120 		if (unlikely(ret < 0))
1121 			return -EIO;
1122 
1123 		phy_mac_interrupt(phydev, 0);
1124 
1125 		del_timer(&dev->stat_monitor);
1126 	} else if (phydev->link && !dev->link_on) {
1127 		dev->link_on = true;
1128 
1129 		phy_ethtool_ksettings_get(phydev, &ecmd);
1130 
1131 		ret = phy_read(phydev, LAN88XX_INT_STS);
1132 
1133 		if (dev->udev->speed == USB_SPEED_SUPER) {
1134 			if (ecmd.base.speed == 1000) {
1135 				/* disable U2 */
1136 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1137 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1138 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1139 				/* enable U1 */
1140 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1141 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1142 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1143 			} else {
1144 				/* enable U1 & U2 */
1145 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1146 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1147 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1148 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1149 			}
1150 		}
1151 
1152 		ladv = phy_read(phydev, MII_ADVERTISE);
1153 		if (ladv < 0)
1154 			return ladv;
1155 
1156 		radv = phy_read(phydev, MII_LPA);
1157 		if (radv < 0)
1158 			return radv;
1159 
1160 		netif_dbg(dev, link, dev->net,
1161 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1162 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1163 
1164 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1165 						 radv);
1166 		phy_mac_interrupt(phydev, 1);
1167 
1168 		if (!timer_pending(&dev->stat_monitor)) {
1169 			dev->delta = 1;
1170 			mod_timer(&dev->stat_monitor,
1171 				  jiffies + STAT_UPDATE_TIMER);
1172 		}
1173 	}
1174 
1175 	return ret;
1176 }
1177 
1178 /* some work can't be done in tasklets, so we use keventd
1179  *
1180  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1181  * but tasklet_schedule() doesn't.	hope the failure is rare.
1182  */
1183 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1184 {
1185 	set_bit(work, &dev->flags);
1186 	if (!schedule_delayed_work(&dev->wq, 0))
1187 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 }
1189 
1190 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1191 {
1192 	u32 intdata;
1193 
1194 	if (urb->actual_length != 4) {
1195 		netdev_warn(dev->net,
1196 			    "unexpected urb length %d", urb->actual_length);
1197 		return;
1198 	}
1199 
1200 	memcpy(&intdata, urb->transfer_buffer, 4);
1201 	le32_to_cpus(&intdata);
1202 
1203 	if (intdata & INT_ENP_PHY_INT) {
1204 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1205 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1206 	} else
1207 		netdev_warn(dev->net,
1208 			    "unexpected interrupt: 0x%08x\n", intdata);
1209 }
1210 
1211 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1212 {
1213 	return MAX_EEPROM_SIZE;
1214 }
1215 
1216 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1217 				      struct ethtool_eeprom *ee, u8 *data)
1218 {
1219 	struct lan78xx_net *dev = netdev_priv(netdev);
1220 
1221 	ee->magic = LAN78XX_EEPROM_MAGIC;
1222 
1223 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 }
1225 
1226 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1227 				      struct ethtool_eeprom *ee, u8 *data)
1228 {
1229 	struct lan78xx_net *dev = netdev_priv(netdev);
1230 
1231 	/* Allow entire eeprom update only */
1232 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1233 	    (ee->offset == 0) &&
1234 	    (ee->len == 512) &&
1235 	    (data[0] == EEPROM_INDICATOR))
1236 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1237 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1238 		 (ee->offset == 0) &&
1239 		 (ee->len == 512) &&
1240 		 (data[0] == OTP_INDICATOR_1))
1241 		return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1242 
1243 	return -EINVAL;
1244 }
1245 
1246 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1247 				u8 *data)
1248 {
1249 	if (stringset == ETH_SS_STATS)
1250 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 }
1252 
1253 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1254 {
1255 	if (sset == ETH_SS_STATS)
1256 		return ARRAY_SIZE(lan78xx_gstrings);
1257 	else
1258 		return -EOPNOTSUPP;
1259 }
1260 
1261 static void lan78xx_get_stats(struct net_device *netdev,
1262 			      struct ethtool_stats *stats, u64 *data)
1263 {
1264 	struct lan78xx_net *dev = netdev_priv(netdev);
1265 
1266 	lan78xx_update_stats(dev);
1267 
1268 	mutex_lock(&dev->stats.access_lock);
1269 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1270 	mutex_unlock(&dev->stats.access_lock);
1271 }
1272 
1273 static void lan78xx_get_wol(struct net_device *netdev,
1274 			    struct ethtool_wolinfo *wol)
1275 {
1276 	struct lan78xx_net *dev = netdev_priv(netdev);
1277 	int ret;
1278 	u32 buf;
1279 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1280 
1281 	if (usb_autopm_get_interface(dev->intf) < 0)
1282 			return;
1283 
1284 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1285 	if (unlikely(ret < 0)) {
1286 		wol->supported = 0;
1287 		wol->wolopts = 0;
1288 	} else {
1289 		if (buf & USB_CFG_RMT_WKP_) {
1290 			wol->supported = WAKE_ALL;
1291 			wol->wolopts = pdata->wol;
1292 		} else {
1293 			wol->supported = 0;
1294 			wol->wolopts = 0;
1295 		}
1296 	}
1297 
1298 	usb_autopm_put_interface(dev->intf);
1299 }
1300 
1301 static int lan78xx_set_wol(struct net_device *netdev,
1302 			   struct ethtool_wolinfo *wol)
1303 {
1304 	struct lan78xx_net *dev = netdev_priv(netdev);
1305 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1306 	int ret;
1307 
1308 	ret = usb_autopm_get_interface(dev->intf);
1309 	if (ret < 0)
1310 		return ret;
1311 
1312 	pdata->wol = 0;
1313 	if (wol->wolopts & WAKE_UCAST)
1314 		pdata->wol |= WAKE_UCAST;
1315 	if (wol->wolopts & WAKE_MCAST)
1316 		pdata->wol |= WAKE_MCAST;
1317 	if (wol->wolopts & WAKE_BCAST)
1318 		pdata->wol |= WAKE_BCAST;
1319 	if (wol->wolopts & WAKE_MAGIC)
1320 		pdata->wol |= WAKE_MAGIC;
1321 	if (wol->wolopts & WAKE_PHY)
1322 		pdata->wol |= WAKE_PHY;
1323 	if (wol->wolopts & WAKE_ARP)
1324 		pdata->wol |= WAKE_ARP;
1325 
1326 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1327 
1328 	phy_ethtool_set_wol(netdev->phydev, wol);
1329 
1330 	usb_autopm_put_interface(dev->intf);
1331 
1332 	return ret;
1333 }
1334 
1335 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1336 {
1337 	struct lan78xx_net *dev = netdev_priv(net);
1338 	struct phy_device *phydev = net->phydev;
1339 	int ret;
1340 	u32 buf;
1341 
1342 	ret = usb_autopm_get_interface(dev->intf);
1343 	if (ret < 0)
1344 		return ret;
1345 
1346 	ret = phy_ethtool_get_eee(phydev, edata);
1347 	if (ret < 0)
1348 		goto exit;
1349 
1350 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1351 	if (buf & MAC_CR_EEE_EN_) {
1352 		edata->eee_enabled = true;
1353 		edata->eee_active = !!(edata->advertised &
1354 				       edata->lp_advertised);
1355 		edata->tx_lpi_enabled = true;
1356 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1357 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1358 		edata->tx_lpi_timer = buf;
1359 	} else {
1360 		edata->eee_enabled = false;
1361 		edata->eee_active = false;
1362 		edata->tx_lpi_enabled = false;
1363 		edata->tx_lpi_timer = 0;
1364 	}
1365 
1366 	ret = 0;
1367 exit:
1368 	usb_autopm_put_interface(dev->intf);
1369 
1370 	return ret;
1371 }
1372 
1373 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1374 {
1375 	struct lan78xx_net *dev = netdev_priv(net);
1376 	int ret;
1377 	u32 buf;
1378 
1379 	ret = usb_autopm_get_interface(dev->intf);
1380 	if (ret < 0)
1381 		return ret;
1382 
1383 	if (edata->eee_enabled) {
1384 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1385 		buf |= MAC_CR_EEE_EN_;
1386 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1387 
1388 		phy_ethtool_set_eee(net->phydev, edata);
1389 
1390 		buf = (u32)edata->tx_lpi_timer;
1391 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1392 	} else {
1393 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1394 		buf &= ~MAC_CR_EEE_EN_;
1395 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1396 	}
1397 
1398 	usb_autopm_put_interface(dev->intf);
1399 
1400 	return 0;
1401 }
1402 
1403 static u32 lan78xx_get_link(struct net_device *net)
1404 {
1405 	phy_read_status(net->phydev);
1406 
1407 	return net->phydev->link;
1408 }
1409 
1410 static int lan78xx_nway_reset(struct net_device *net)
1411 {
1412 	return phy_start_aneg(net->phydev);
1413 }
1414 
1415 static void lan78xx_get_drvinfo(struct net_device *net,
1416 				struct ethtool_drvinfo *info)
1417 {
1418 	struct lan78xx_net *dev = netdev_priv(net);
1419 
1420 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1421 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1422 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1423 }
1424 
1425 static u32 lan78xx_get_msglevel(struct net_device *net)
1426 {
1427 	struct lan78xx_net *dev = netdev_priv(net);
1428 
1429 	return dev->msg_enable;
1430 }
1431 
1432 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1433 {
1434 	struct lan78xx_net *dev = netdev_priv(net);
1435 
1436 	dev->msg_enable = level;
1437 }
1438 
1439 static int lan78xx_get_mdix_status(struct net_device *net)
1440 {
1441 	struct phy_device *phydev = net->phydev;
1442 	int buf;
1443 
1444 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1445 	buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1446 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1447 
1448 	return buf;
1449 }
1450 
1451 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1452 {
1453 	struct lan78xx_net *dev = netdev_priv(net);
1454 	struct phy_device *phydev = net->phydev;
1455 	int buf;
1456 
1457 	if (mdix_ctrl == ETH_TP_MDI) {
1458 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1459 			  LAN88XX_EXT_PAGE_SPACE_1);
1460 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1461 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1462 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1463 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1464 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1465 			  LAN88XX_EXT_PAGE_SPACE_0);
1466 	} else if (mdix_ctrl == ETH_TP_MDI_X) {
1467 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1468 			  LAN88XX_EXT_PAGE_SPACE_1);
1469 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1470 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1471 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1472 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1473 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1474 			  LAN88XX_EXT_PAGE_SPACE_0);
1475 	} else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1476 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1477 			  LAN88XX_EXT_PAGE_SPACE_1);
1478 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1479 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1480 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1481 			  buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1482 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1483 			  LAN88XX_EXT_PAGE_SPACE_0);
1484 	}
1485 	dev->mdix_ctrl = mdix_ctrl;
1486 }
1487 
1488 static int lan78xx_get_link_ksettings(struct net_device *net,
1489 				      struct ethtool_link_ksettings *cmd)
1490 {
1491 	struct lan78xx_net *dev = netdev_priv(net);
1492 	struct phy_device *phydev = net->phydev;
1493 	int ret;
1494 	int buf;
1495 
1496 	ret = usb_autopm_get_interface(dev->intf);
1497 	if (ret < 0)
1498 		return ret;
1499 
1500 	ret = phy_ethtool_ksettings_get(phydev, cmd);
1501 
1502 	buf = lan78xx_get_mdix_status(net);
1503 
1504 	buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1505 	if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1506 		cmd->base.eth_tp_mdix = ETH_TP_MDI_AUTO;
1507 		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1508 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1509 		cmd->base.eth_tp_mdix = ETH_TP_MDI;
1510 		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
1511 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1512 		cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
1513 		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1514 	}
1515 
1516 	usb_autopm_put_interface(dev->intf);
1517 
1518 	return ret;
1519 }
1520 
1521 static int lan78xx_set_link_ksettings(struct net_device *net,
1522 				      const struct ethtool_link_ksettings *cmd)
1523 {
1524 	struct lan78xx_net *dev = netdev_priv(net);
1525 	struct phy_device *phydev = net->phydev;
1526 	int ret = 0;
1527 	int temp;
1528 
1529 	ret = usb_autopm_get_interface(dev->intf);
1530 	if (ret < 0)
1531 		return ret;
1532 
1533 	if (dev->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
1534 		lan78xx_set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
1535 
1536 	/* change speed & duplex */
1537 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1538 
1539 	if (!cmd->base.autoneg) {
1540 		/* force link down */
1541 		temp = phy_read(phydev, MII_BMCR);
1542 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1543 		mdelay(1);
1544 		phy_write(phydev, MII_BMCR, temp);
1545 	}
1546 
1547 	usb_autopm_put_interface(dev->intf);
1548 
1549 	return ret;
1550 }
1551 
1552 static void lan78xx_get_pause(struct net_device *net,
1553 			      struct ethtool_pauseparam *pause)
1554 {
1555 	struct lan78xx_net *dev = netdev_priv(net);
1556 	struct phy_device *phydev = net->phydev;
1557 	struct ethtool_link_ksettings ecmd;
1558 
1559 	phy_ethtool_ksettings_get(phydev, &ecmd);
1560 
1561 	pause->autoneg = dev->fc_autoneg;
1562 
1563 	if (dev->fc_request_control & FLOW_CTRL_TX)
1564 		pause->tx_pause = 1;
1565 
1566 	if (dev->fc_request_control & FLOW_CTRL_RX)
1567 		pause->rx_pause = 1;
1568 }
1569 
1570 static int lan78xx_set_pause(struct net_device *net,
1571 			     struct ethtool_pauseparam *pause)
1572 {
1573 	struct lan78xx_net *dev = netdev_priv(net);
1574 	struct phy_device *phydev = net->phydev;
1575 	struct ethtool_link_ksettings ecmd;
1576 	int ret;
1577 
1578 	phy_ethtool_ksettings_get(phydev, &ecmd);
1579 
1580 	if (pause->autoneg && !ecmd.base.autoneg) {
1581 		ret = -EINVAL;
1582 		goto exit;
1583 	}
1584 
1585 	dev->fc_request_control = 0;
1586 	if (pause->rx_pause)
1587 		dev->fc_request_control |= FLOW_CTRL_RX;
1588 
1589 	if (pause->tx_pause)
1590 		dev->fc_request_control |= FLOW_CTRL_TX;
1591 
1592 	if (ecmd.base.autoneg) {
1593 		u32 mii_adv;
1594 		u32 advertising;
1595 
1596 		ethtool_convert_link_mode_to_legacy_u32(
1597 			&advertising, ecmd.link_modes.advertising);
1598 
1599 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1600 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1602 
1603 		ethtool_convert_legacy_u32_to_link_mode(
1604 			ecmd.link_modes.advertising, advertising);
1605 
1606 		phy_ethtool_ksettings_set(phydev, &ecmd);
1607 	}
1608 
1609 	dev->fc_autoneg = pause->autoneg;
1610 
1611 	ret = 0;
1612 exit:
1613 	return ret;
1614 }
1615 
1616 static const struct ethtool_ops lan78xx_ethtool_ops = {
1617 	.get_link	= lan78xx_get_link,
1618 	.nway_reset	= lan78xx_nway_reset,
1619 	.get_drvinfo	= lan78xx_get_drvinfo,
1620 	.get_msglevel	= lan78xx_get_msglevel,
1621 	.set_msglevel	= lan78xx_set_msglevel,
1622 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1623 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1624 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1625 	.get_ethtool_stats = lan78xx_get_stats,
1626 	.get_sset_count = lan78xx_get_sset_count,
1627 	.get_strings	= lan78xx_get_strings,
1628 	.get_wol	= lan78xx_get_wol,
1629 	.set_wol	= lan78xx_set_wol,
1630 	.get_eee	= lan78xx_get_eee,
1631 	.set_eee	= lan78xx_set_eee,
1632 	.get_pauseparam	= lan78xx_get_pause,
1633 	.set_pauseparam	= lan78xx_set_pause,
1634 	.get_link_ksettings = lan78xx_get_link_ksettings,
1635 	.set_link_ksettings = lan78xx_set_link_ksettings,
1636 };
1637 
1638 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1639 {
1640 	if (!netif_running(netdev))
1641 		return -EINVAL;
1642 
1643 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1644 }
1645 
1646 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1647 {
1648 	u32 addr_lo, addr_hi;
1649 	int ret;
1650 	u8 addr[6];
1651 
1652 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1653 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1654 
1655 	addr[0] = addr_lo & 0xFF;
1656 	addr[1] = (addr_lo >> 8) & 0xFF;
1657 	addr[2] = (addr_lo >> 16) & 0xFF;
1658 	addr[3] = (addr_lo >> 24) & 0xFF;
1659 	addr[4] = addr_hi & 0xFF;
1660 	addr[5] = (addr_hi >> 8) & 0xFF;
1661 
1662 	if (!is_valid_ether_addr(addr)) {
1663 		/* reading mac address from EEPROM or OTP */
1664 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1665 					 addr) == 0) ||
1666 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1667 				      addr) == 0)) {
1668 			if (is_valid_ether_addr(addr)) {
1669 				/* eeprom values are valid so use them */
1670 				netif_dbg(dev, ifup, dev->net,
1671 					  "MAC address read from EEPROM");
1672 			} else {
1673 				/* generate random MAC */
1674 				random_ether_addr(addr);
1675 				netif_dbg(dev, ifup, dev->net,
1676 					  "MAC address set to random addr");
1677 			}
1678 
1679 			addr_lo = addr[0] | (addr[1] << 8) |
1680 				  (addr[2] << 16) | (addr[3] << 24);
1681 			addr_hi = addr[4] | (addr[5] << 8);
1682 
1683 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1684 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1685 		} else {
1686 			/* generate random MAC */
1687 			random_ether_addr(addr);
1688 			netif_dbg(dev, ifup, dev->net,
1689 				  "MAC address set to random addr");
1690 		}
1691 	}
1692 
1693 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1694 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1695 
1696 	ether_addr_copy(dev->net->dev_addr, addr);
1697 }
1698 
1699 /* MDIO read and write wrappers for phylib */
1700 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1701 {
1702 	struct lan78xx_net *dev = bus->priv;
1703 	u32 val, addr;
1704 	int ret;
1705 
1706 	ret = usb_autopm_get_interface(dev->intf);
1707 	if (ret < 0)
1708 		return ret;
1709 
1710 	mutex_lock(&dev->phy_mutex);
1711 
1712 	/* confirm MII not busy */
1713 	ret = lan78xx_phy_wait_not_busy(dev);
1714 	if (ret < 0)
1715 		goto done;
1716 
1717 	/* set the address, index & direction (read from PHY) */
1718 	addr = mii_access(phy_id, idx, MII_READ);
1719 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1720 
1721 	ret = lan78xx_phy_wait_not_busy(dev);
1722 	if (ret < 0)
1723 		goto done;
1724 
1725 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1726 
1727 	ret = (int)(val & 0xFFFF);
1728 
1729 done:
1730 	mutex_unlock(&dev->phy_mutex);
1731 	usb_autopm_put_interface(dev->intf);
1732 	return ret;
1733 }
1734 
1735 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1736 				 u16 regval)
1737 {
1738 	struct lan78xx_net *dev = bus->priv;
1739 	u32 val, addr;
1740 	int ret;
1741 
1742 	ret = usb_autopm_get_interface(dev->intf);
1743 	if (ret < 0)
1744 		return ret;
1745 
1746 	mutex_lock(&dev->phy_mutex);
1747 
1748 	/* confirm MII not busy */
1749 	ret = lan78xx_phy_wait_not_busy(dev);
1750 	if (ret < 0)
1751 		goto done;
1752 
1753 	val = (u32)regval;
1754 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1755 
1756 	/* set the address, index & direction (write to PHY) */
1757 	addr = mii_access(phy_id, idx, MII_WRITE);
1758 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1759 
1760 	ret = lan78xx_phy_wait_not_busy(dev);
1761 	if (ret < 0)
1762 		goto done;
1763 
1764 done:
1765 	mutex_unlock(&dev->phy_mutex);
1766 	usb_autopm_put_interface(dev->intf);
1767 	return 0;
1768 }
1769 
1770 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1771 {
1772 	int ret;
1773 
1774 	dev->mdiobus = mdiobus_alloc();
1775 	if (!dev->mdiobus) {
1776 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1777 		return -ENOMEM;
1778 	}
1779 
1780 	dev->mdiobus->priv = (void *)dev;
1781 	dev->mdiobus->read = lan78xx_mdiobus_read;
1782 	dev->mdiobus->write = lan78xx_mdiobus_write;
1783 	dev->mdiobus->name = "lan78xx-mdiobus";
1784 
1785 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1786 		 dev->udev->bus->busnum, dev->udev->devnum);
1787 
1788 	switch (dev->chipid) {
1789 	case ID_REV_CHIP_ID_7800_:
1790 	case ID_REV_CHIP_ID_7850_:
1791 		/* set to internal PHY id */
1792 		dev->mdiobus->phy_mask = ~(1 << 1);
1793 		break;
1794 	}
1795 
1796 	ret = mdiobus_register(dev->mdiobus);
1797 	if (ret) {
1798 		netdev_err(dev->net, "can't register MDIO bus\n");
1799 		goto exit1;
1800 	}
1801 
1802 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1803 	return 0;
1804 exit1:
1805 	mdiobus_free(dev->mdiobus);
1806 	return ret;
1807 }
1808 
1809 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1810 {
1811 	mdiobus_unregister(dev->mdiobus);
1812 	mdiobus_free(dev->mdiobus);
1813 }
1814 
1815 static void lan78xx_link_status_change(struct net_device *net)
1816 {
1817 	struct phy_device *phydev = net->phydev;
1818 	int ret, temp;
1819 
1820 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1821 	 * when cable is switched between long(~50+m) and short one.
1822 	 * As workaround, set to 10 before setting to 100
1823 	 * at forced 100 F/H mode.
1824 	 */
1825 	if (!phydev->autoneg && (phydev->speed == 100)) {
1826 		/* disable phy interrupt */
1827 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1828 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1829 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1830 
1831 		temp = phy_read(phydev, MII_BMCR);
1832 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1833 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1834 		temp |= BMCR_SPEED100;
1835 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1836 
1837 		/* clear pending interrupt generated while workaround */
1838 		temp = phy_read(phydev, LAN88XX_INT_STS);
1839 
1840 		/* enable phy interrupt back */
1841 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1842 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1843 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1844 	}
1845 }
1846 
1847 static int lan78xx_phy_init(struct lan78xx_net *dev)
1848 {
1849 	int ret;
1850 	u32 mii_adv;
1851 	struct phy_device *phydev = dev->net->phydev;
1852 
1853 	phydev = phy_find_first(dev->mdiobus);
1854 	if (!phydev) {
1855 		netdev_err(dev->net, "no PHY found\n");
1856 		return -EIO;
1857 	}
1858 
1859 	/* Enable PHY interrupts.
1860 	 * We handle our own interrupt
1861 	 */
1862 	ret = phy_read(phydev, LAN88XX_INT_STS);
1863 	ret = phy_write(phydev, LAN88XX_INT_MASK,
1864 			LAN88XX_INT_MASK_MDINTPIN_EN_ |
1865 			LAN88XX_INT_MASK_LINK_CHANGE_);
1866 
1867 	phydev->irq = PHY_IGNORE_INTERRUPT;
1868 
1869 	ret = phy_connect_direct(dev->net, phydev,
1870 				 lan78xx_link_status_change,
1871 				 PHY_INTERFACE_MODE_GMII);
1872 	if (ret) {
1873 		netdev_err(dev->net, "can't attach PHY to %s\n",
1874 			   dev->mdiobus->id);
1875 		return -EIO;
1876 	}
1877 
1878 	/* set to AUTOMDIX */
1879 	lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1880 
1881 	/* MAC doesn't support 1000T Half */
1882 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
1883 
1884 	/* support both flow controls */
1885 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1886 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1887 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1888 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1889 
1890 	genphy_config_aneg(phydev);
1891 
1892 	dev->fc_autoneg = phydev->autoneg;
1893 
1894 	phy_start(phydev);
1895 
1896 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1897 
1898 	return 0;
1899 }
1900 
1901 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1902 {
1903 	int ret = 0;
1904 	u32 buf;
1905 	bool rxenabled;
1906 
1907 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1908 
1909 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1910 
1911 	if (rxenabled) {
1912 		buf &= ~MAC_RX_RXEN_;
1913 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1914 	}
1915 
1916 	/* add 4 to size for FCS */
1917 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1918 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1919 
1920 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1921 
1922 	if (rxenabled) {
1923 		buf |= MAC_RX_RXEN_;
1924 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1925 	}
1926 
1927 	return 0;
1928 }
1929 
1930 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1931 {
1932 	struct sk_buff *skb;
1933 	unsigned long flags;
1934 	int count = 0;
1935 
1936 	spin_lock_irqsave(&q->lock, flags);
1937 	while (!skb_queue_empty(q)) {
1938 		struct skb_data	*entry;
1939 		struct urb *urb;
1940 		int ret;
1941 
1942 		skb_queue_walk(q, skb) {
1943 			entry = (struct skb_data *)skb->cb;
1944 			if (entry->state != unlink_start)
1945 				goto found;
1946 		}
1947 		break;
1948 found:
1949 		entry->state = unlink_start;
1950 		urb = entry->urb;
1951 
1952 		/* Get reference count of the URB to avoid it to be
1953 		 * freed during usb_unlink_urb, which may trigger
1954 		 * use-after-free problem inside usb_unlink_urb since
1955 		 * usb_unlink_urb is always racing with .complete
1956 		 * handler(include defer_bh).
1957 		 */
1958 		usb_get_urb(urb);
1959 		spin_unlock_irqrestore(&q->lock, flags);
1960 		/* during some PM-driven resume scenarios,
1961 		 * these (async) unlinks complete immediately
1962 		 */
1963 		ret = usb_unlink_urb(urb);
1964 		if (ret != -EINPROGRESS && ret != 0)
1965 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1966 		else
1967 			count++;
1968 		usb_put_urb(urb);
1969 		spin_lock_irqsave(&q->lock, flags);
1970 	}
1971 	spin_unlock_irqrestore(&q->lock, flags);
1972 	return count;
1973 }
1974 
1975 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1976 {
1977 	struct lan78xx_net *dev = netdev_priv(netdev);
1978 	int ll_mtu = new_mtu + netdev->hard_header_len;
1979 	int old_hard_mtu = dev->hard_mtu;
1980 	int old_rx_urb_size = dev->rx_urb_size;
1981 	int ret;
1982 
1983 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1984 		return -EINVAL;
1985 
1986 	if (new_mtu <= 0)
1987 		return -EINVAL;
1988 	/* no second zero-length packet read wanted after mtu-sized packets */
1989 	if ((ll_mtu % dev->maxpacket) == 0)
1990 		return -EDOM;
1991 
1992 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1993 
1994 	netdev->mtu = new_mtu;
1995 
1996 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1997 	if (dev->rx_urb_size == old_hard_mtu) {
1998 		dev->rx_urb_size = dev->hard_mtu;
1999 		if (dev->rx_urb_size > old_rx_urb_size) {
2000 			if (netif_running(dev->net)) {
2001 				unlink_urbs(dev, &dev->rxq);
2002 				tasklet_schedule(&dev->bh);
2003 			}
2004 		}
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2011 {
2012 	struct lan78xx_net *dev = netdev_priv(netdev);
2013 	struct sockaddr *addr = p;
2014 	u32 addr_lo, addr_hi;
2015 	int ret;
2016 
2017 	if (netif_running(netdev))
2018 		return -EBUSY;
2019 
2020 	if (!is_valid_ether_addr(addr->sa_data))
2021 		return -EADDRNOTAVAIL;
2022 
2023 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2024 
2025 	addr_lo = netdev->dev_addr[0] |
2026 		  netdev->dev_addr[1] << 8 |
2027 		  netdev->dev_addr[2] << 16 |
2028 		  netdev->dev_addr[3] << 24;
2029 	addr_hi = netdev->dev_addr[4] |
2030 		  netdev->dev_addr[5] << 8;
2031 
2032 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2033 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2034 
2035 	return 0;
2036 }
2037 
2038 /* Enable or disable Rx checksum offload engine */
2039 static int lan78xx_set_features(struct net_device *netdev,
2040 				netdev_features_t features)
2041 {
2042 	struct lan78xx_net *dev = netdev_priv(netdev);
2043 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2044 	unsigned long flags;
2045 	int ret;
2046 
2047 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2048 
2049 	if (features & NETIF_F_RXCSUM) {
2050 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2051 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2052 	} else {
2053 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2054 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2055 	}
2056 
2057 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2058 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2059 	else
2060 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2061 
2062 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2063 
2064 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2065 
2066 	return 0;
2067 }
2068 
2069 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2070 {
2071 	struct lan78xx_priv *pdata =
2072 			container_of(param, struct lan78xx_priv, set_vlan);
2073 	struct lan78xx_net *dev = pdata->dev;
2074 
2075 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2076 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2077 }
2078 
2079 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2080 				   __be16 proto, u16 vid)
2081 {
2082 	struct lan78xx_net *dev = netdev_priv(netdev);
2083 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2084 	u16 vid_bit_index;
2085 	u16 vid_dword_index;
2086 
2087 	vid_dword_index = (vid >> 5) & 0x7F;
2088 	vid_bit_index = vid & 0x1F;
2089 
2090 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2091 
2092 	/* defer register writes to a sleepable context */
2093 	schedule_work(&pdata->set_vlan);
2094 
2095 	return 0;
2096 }
2097 
2098 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2099 				    __be16 proto, u16 vid)
2100 {
2101 	struct lan78xx_net *dev = netdev_priv(netdev);
2102 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2103 	u16 vid_bit_index;
2104 	u16 vid_dword_index;
2105 
2106 	vid_dword_index = (vid >> 5) & 0x7F;
2107 	vid_bit_index = vid & 0x1F;
2108 
2109 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2110 
2111 	/* defer register writes to a sleepable context */
2112 	schedule_work(&pdata->set_vlan);
2113 
2114 	return 0;
2115 }
2116 
2117 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2118 {
2119 	int ret;
2120 	u32 buf;
2121 	u32 regs[6] = { 0 };
2122 
2123 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2124 	if (buf & USB_CFG1_LTM_ENABLE_) {
2125 		u8 temp[2];
2126 		/* Get values from EEPROM first */
2127 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2128 			if (temp[0] == 24) {
2129 				ret = lan78xx_read_raw_eeprom(dev,
2130 							      temp[1] * 2,
2131 							      24,
2132 							      (u8 *)regs);
2133 				if (ret < 0)
2134 					return;
2135 			}
2136 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2137 			if (temp[0] == 24) {
2138 				ret = lan78xx_read_raw_otp(dev,
2139 							   temp[1] * 2,
2140 							   24,
2141 							   (u8 *)regs);
2142 				if (ret < 0)
2143 					return;
2144 			}
2145 		}
2146 	}
2147 
2148 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2149 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2150 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2151 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2152 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2153 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2154 }
2155 
2156 static int lan78xx_reset(struct lan78xx_net *dev)
2157 {
2158 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2159 	u32 buf;
2160 	int ret = 0;
2161 	unsigned long timeout;
2162 
2163 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2164 	buf |= HW_CFG_LRST_;
2165 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2166 
2167 	timeout = jiffies + HZ;
2168 	do {
2169 		mdelay(1);
2170 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2171 		if (time_after(jiffies, timeout)) {
2172 			netdev_warn(dev->net,
2173 				    "timeout on completion of LiteReset");
2174 			return -EIO;
2175 		}
2176 	} while (buf & HW_CFG_LRST_);
2177 
2178 	lan78xx_init_mac_address(dev);
2179 
2180 	/* save DEVID for later usage */
2181 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2182 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2183 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2184 
2185 	/* Respond to the IN token with a NAK */
2186 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2187 	buf |= USB_CFG_BIR_;
2188 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2189 
2190 	/* Init LTM */
2191 	lan78xx_init_ltm(dev);
2192 
2193 	dev->net->hard_header_len += TX_OVERHEAD;
2194 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2195 
2196 	if (dev->udev->speed == USB_SPEED_SUPER) {
2197 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2198 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2199 		dev->rx_qlen = 4;
2200 		dev->tx_qlen = 4;
2201 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2202 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2203 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2204 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2205 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2206 	} else {
2207 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2208 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2209 		dev->rx_qlen = 4;
2210 	}
2211 
2212 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2213 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2214 
2215 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2216 	buf |= HW_CFG_MEF_;
2217 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2218 
2219 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2220 	buf |= USB_CFG_BCE_;
2221 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2222 
2223 	/* set FIFO sizes */
2224 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2225 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2226 
2227 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2228 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2229 
2230 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2231 	ret = lan78xx_write_reg(dev, FLOW, 0);
2232 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2233 
2234 	/* Don't need rfe_ctl_lock during initialisation */
2235 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2236 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2237 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2238 
2239 	/* Enable or disable checksum offload engines */
2240 	lan78xx_set_features(dev->net, dev->net->features);
2241 
2242 	lan78xx_set_multicast(dev->net);
2243 
2244 	/* reset PHY */
2245 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2246 	buf |= PMT_CTL_PHY_RST_;
2247 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2248 
2249 	timeout = jiffies + HZ;
2250 	do {
2251 		mdelay(1);
2252 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2253 		if (time_after(jiffies, timeout)) {
2254 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2255 			return -EIO;
2256 		}
2257 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2258 
2259 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2260 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2261 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2262 
2263 	/* enable PHY interrupts */
2264 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2265 	buf |= INT_ENP_PHY_INT;
2266 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2267 
2268 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2269 	buf |= MAC_TX_TXEN_;
2270 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2271 
2272 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2273 	buf |= FCT_TX_CTL_EN_;
2274 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2275 
2276 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2277 
2278 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2279 	buf |= MAC_RX_RXEN_;
2280 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2281 
2282 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2283 	buf |= FCT_RX_CTL_EN_;
2284 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2285 
2286 	return 0;
2287 }
2288 
2289 static void lan78xx_init_stats(struct lan78xx_net *dev)
2290 {
2291 	u32 *p;
2292 	int i;
2293 
2294 	/* initialize for stats update
2295 	 * some counters are 20bits and some are 32bits
2296 	 */
2297 	p = (u32 *)&dev->stats.rollover_max;
2298 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2299 		p[i] = 0xFFFFF;
2300 
2301 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2302 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2303 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2304 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2305 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2306 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2307 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2308 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2309 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2310 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2311 
2312 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2313 }
2314 
2315 static int lan78xx_open(struct net_device *net)
2316 {
2317 	struct lan78xx_net *dev = netdev_priv(net);
2318 	int ret;
2319 
2320 	ret = usb_autopm_get_interface(dev->intf);
2321 	if (ret < 0)
2322 		goto out;
2323 
2324 	ret = lan78xx_reset(dev);
2325 	if (ret < 0)
2326 		goto done;
2327 
2328 	ret = lan78xx_phy_init(dev);
2329 	if (ret < 0)
2330 		goto done;
2331 
2332 	/* for Link Check */
2333 	if (dev->urb_intr) {
2334 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2335 		if (ret < 0) {
2336 			netif_err(dev, ifup, dev->net,
2337 				  "intr submit %d\n", ret);
2338 			goto done;
2339 		}
2340 	}
2341 
2342 	lan78xx_init_stats(dev);
2343 
2344 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2345 
2346 	netif_start_queue(net);
2347 
2348 	dev->link_on = false;
2349 
2350 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2351 done:
2352 	usb_autopm_put_interface(dev->intf);
2353 
2354 out:
2355 	return ret;
2356 }
2357 
2358 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2359 {
2360 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2361 	DECLARE_WAITQUEUE(wait, current);
2362 	int temp;
2363 
2364 	/* ensure there are no more active urbs */
2365 	add_wait_queue(&unlink_wakeup, &wait);
2366 	set_current_state(TASK_UNINTERRUPTIBLE);
2367 	dev->wait = &unlink_wakeup;
2368 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2369 
2370 	/* maybe wait for deletions to finish. */
2371 	while (!skb_queue_empty(&dev->rxq) &&
2372 	       !skb_queue_empty(&dev->txq) &&
2373 	       !skb_queue_empty(&dev->done)) {
2374 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2375 		set_current_state(TASK_UNINTERRUPTIBLE);
2376 		netif_dbg(dev, ifdown, dev->net,
2377 			  "waited for %d urb completions\n", temp);
2378 	}
2379 	set_current_state(TASK_RUNNING);
2380 	dev->wait = NULL;
2381 	remove_wait_queue(&unlink_wakeup, &wait);
2382 }
2383 
2384 static int lan78xx_stop(struct net_device *net)
2385 {
2386 	struct lan78xx_net		*dev = netdev_priv(net);
2387 
2388 	if (timer_pending(&dev->stat_monitor))
2389 		del_timer_sync(&dev->stat_monitor);
2390 
2391 	phy_stop(net->phydev);
2392 	phy_disconnect(net->phydev);
2393 	net->phydev = NULL;
2394 
2395 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2396 	netif_stop_queue(net);
2397 
2398 	netif_info(dev, ifdown, dev->net,
2399 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2400 		   net->stats.rx_packets, net->stats.tx_packets,
2401 		   net->stats.rx_errors, net->stats.tx_errors);
2402 
2403 	lan78xx_terminate_urbs(dev);
2404 
2405 	usb_kill_urb(dev->urb_intr);
2406 
2407 	skb_queue_purge(&dev->rxq_pause);
2408 
2409 	/* deferred work (task, timer, softirq) must also stop.
2410 	 * can't flush_scheduled_work() until we drop rtnl (later),
2411 	 * else workers could deadlock; so make workers a NOP.
2412 	 */
2413 	dev->flags = 0;
2414 	cancel_delayed_work_sync(&dev->wq);
2415 	tasklet_kill(&dev->bh);
2416 
2417 	usb_autopm_put_interface(dev->intf);
2418 
2419 	return 0;
2420 }
2421 
2422 static int lan78xx_linearize(struct sk_buff *skb)
2423 {
2424 	return skb_linearize(skb);
2425 }
2426 
2427 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2428 				       struct sk_buff *skb, gfp_t flags)
2429 {
2430 	u32 tx_cmd_a, tx_cmd_b;
2431 
2432 	if (skb_headroom(skb) < TX_OVERHEAD) {
2433 		struct sk_buff *skb2;
2434 
2435 		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2436 		dev_kfree_skb_any(skb);
2437 		skb = skb2;
2438 		if (!skb)
2439 			return NULL;
2440 	}
2441 
2442 	if (lan78xx_linearize(skb) < 0)
2443 		return NULL;
2444 
2445 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2446 
2447 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2448 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2449 
2450 	tx_cmd_b = 0;
2451 	if (skb_is_gso(skb)) {
2452 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2453 
2454 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2455 
2456 		tx_cmd_a |= TX_CMD_A_LSO_;
2457 	}
2458 
2459 	if (skb_vlan_tag_present(skb)) {
2460 		tx_cmd_a |= TX_CMD_A_IVTG_;
2461 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2462 	}
2463 
2464 	skb_push(skb, 4);
2465 	cpu_to_le32s(&tx_cmd_b);
2466 	memcpy(skb->data, &tx_cmd_b, 4);
2467 
2468 	skb_push(skb, 4);
2469 	cpu_to_le32s(&tx_cmd_a);
2470 	memcpy(skb->data, &tx_cmd_a, 4);
2471 
2472 	return skb;
2473 }
2474 
2475 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2476 			       struct sk_buff_head *list, enum skb_state state)
2477 {
2478 	unsigned long flags;
2479 	enum skb_state old_state;
2480 	struct skb_data *entry = (struct skb_data *)skb->cb;
2481 
2482 	spin_lock_irqsave(&list->lock, flags);
2483 	old_state = entry->state;
2484 	entry->state = state;
2485 
2486 	__skb_unlink(skb, list);
2487 	spin_unlock(&list->lock);
2488 	spin_lock(&dev->done.lock);
2489 
2490 	__skb_queue_tail(&dev->done, skb);
2491 	if (skb_queue_len(&dev->done) == 1)
2492 		tasklet_schedule(&dev->bh);
2493 	spin_unlock_irqrestore(&dev->done.lock, flags);
2494 
2495 	return old_state;
2496 }
2497 
2498 static void tx_complete(struct urb *urb)
2499 {
2500 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2501 	struct skb_data *entry = (struct skb_data *)skb->cb;
2502 	struct lan78xx_net *dev = entry->dev;
2503 
2504 	if (urb->status == 0) {
2505 		dev->net->stats.tx_packets += entry->num_of_packet;
2506 		dev->net->stats.tx_bytes += entry->length;
2507 	} else {
2508 		dev->net->stats.tx_errors++;
2509 
2510 		switch (urb->status) {
2511 		case -EPIPE:
2512 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2513 			break;
2514 
2515 		/* software-driven interface shutdown */
2516 		case -ECONNRESET:
2517 		case -ESHUTDOWN:
2518 			break;
2519 
2520 		case -EPROTO:
2521 		case -ETIME:
2522 		case -EILSEQ:
2523 			netif_stop_queue(dev->net);
2524 			break;
2525 		default:
2526 			netif_dbg(dev, tx_err, dev->net,
2527 				  "tx err %d\n", entry->urb->status);
2528 			break;
2529 		}
2530 	}
2531 
2532 	usb_autopm_put_interface_async(dev->intf);
2533 
2534 	defer_bh(dev, skb, &dev->txq, tx_done);
2535 }
2536 
2537 static void lan78xx_queue_skb(struct sk_buff_head *list,
2538 			      struct sk_buff *newsk, enum skb_state state)
2539 {
2540 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2541 
2542 	__skb_queue_tail(list, newsk);
2543 	entry->state = state;
2544 }
2545 
2546 static netdev_tx_t
2547 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2548 {
2549 	struct lan78xx_net *dev = netdev_priv(net);
2550 	struct sk_buff *skb2 = NULL;
2551 
2552 	if (skb) {
2553 		skb_tx_timestamp(skb);
2554 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2555 	}
2556 
2557 	if (skb2) {
2558 		skb_queue_tail(&dev->txq_pend, skb2);
2559 
2560 		/* throttle TX patch at slower than SUPER SPEED USB */
2561 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2562 		    (skb_queue_len(&dev->txq_pend) > 10))
2563 			netif_stop_queue(net);
2564 	} else {
2565 		netif_dbg(dev, tx_err, dev->net,
2566 			  "lan78xx_tx_prep return NULL\n");
2567 		dev->net->stats.tx_errors++;
2568 		dev->net->stats.tx_dropped++;
2569 	}
2570 
2571 	tasklet_schedule(&dev->bh);
2572 
2573 	return NETDEV_TX_OK;
2574 }
2575 
2576 static int
2577 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2578 {
2579 	int tmp;
2580 	struct usb_host_interface *alt = NULL;
2581 	struct usb_host_endpoint *in = NULL, *out = NULL;
2582 	struct usb_host_endpoint *status = NULL;
2583 
2584 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2585 		unsigned ep;
2586 
2587 		in = NULL;
2588 		out = NULL;
2589 		status = NULL;
2590 		alt = intf->altsetting + tmp;
2591 
2592 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2593 			struct usb_host_endpoint *e;
2594 			int intr = 0;
2595 
2596 			e = alt->endpoint + ep;
2597 			switch (e->desc.bmAttributes) {
2598 			case USB_ENDPOINT_XFER_INT:
2599 				if (!usb_endpoint_dir_in(&e->desc))
2600 					continue;
2601 				intr = 1;
2602 				/* FALLTHROUGH */
2603 			case USB_ENDPOINT_XFER_BULK:
2604 				break;
2605 			default:
2606 				continue;
2607 			}
2608 			if (usb_endpoint_dir_in(&e->desc)) {
2609 				if (!intr && !in)
2610 					in = e;
2611 				else if (intr && !status)
2612 					status = e;
2613 			} else {
2614 				if (!out)
2615 					out = e;
2616 			}
2617 		}
2618 		if (in && out)
2619 			break;
2620 	}
2621 	if (!alt || !in || !out)
2622 		return -EINVAL;
2623 
2624 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2625 				       in->desc.bEndpointAddress &
2626 				       USB_ENDPOINT_NUMBER_MASK);
2627 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2628 					out->desc.bEndpointAddress &
2629 					USB_ENDPOINT_NUMBER_MASK);
2630 	dev->ep_intr = status;
2631 
2632 	return 0;
2633 }
2634 
2635 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2636 {
2637 	struct lan78xx_priv *pdata = NULL;
2638 	int ret;
2639 	int i;
2640 
2641 	ret = lan78xx_get_endpoints(dev, intf);
2642 
2643 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2644 
2645 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2646 	if (!pdata) {
2647 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2648 		return -ENOMEM;
2649 	}
2650 
2651 	pdata->dev = dev;
2652 
2653 	spin_lock_init(&pdata->rfe_ctl_lock);
2654 	mutex_init(&pdata->dataport_mutex);
2655 
2656 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2657 
2658 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2659 		pdata->vlan_table[i] = 0;
2660 
2661 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2662 
2663 	dev->net->features = 0;
2664 
2665 	if (DEFAULT_TX_CSUM_ENABLE)
2666 		dev->net->features |= NETIF_F_HW_CSUM;
2667 
2668 	if (DEFAULT_RX_CSUM_ENABLE)
2669 		dev->net->features |= NETIF_F_RXCSUM;
2670 
2671 	if (DEFAULT_TSO_CSUM_ENABLE)
2672 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2673 
2674 	dev->net->hw_features = dev->net->features;
2675 
2676 	/* Init all registers */
2677 	ret = lan78xx_reset(dev);
2678 
2679 	lan78xx_mdio_init(dev);
2680 
2681 	dev->net->flags |= IFF_MULTICAST;
2682 
2683 	pdata->wol = WAKE_MAGIC;
2684 
2685 	return 0;
2686 }
2687 
2688 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2689 {
2690 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2691 
2692 	lan78xx_remove_mdio(dev);
2693 
2694 	if (pdata) {
2695 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2696 		kfree(pdata);
2697 		pdata = NULL;
2698 		dev->data[0] = 0;
2699 	}
2700 }
2701 
2702 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2703 				    struct sk_buff *skb,
2704 				    u32 rx_cmd_a, u32 rx_cmd_b)
2705 {
2706 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2707 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2708 		skb->ip_summed = CHECKSUM_NONE;
2709 	} else {
2710 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2711 		skb->ip_summed = CHECKSUM_COMPLETE;
2712 	}
2713 }
2714 
2715 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2716 {
2717 	int		status;
2718 
2719 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2720 		skb_queue_tail(&dev->rxq_pause, skb);
2721 		return;
2722 	}
2723 
2724 	dev->net->stats.rx_packets++;
2725 	dev->net->stats.rx_bytes += skb->len;
2726 
2727 	skb->protocol = eth_type_trans(skb, dev->net);
2728 
2729 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2730 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2731 	memset(skb->cb, 0, sizeof(struct skb_data));
2732 
2733 	if (skb_defer_rx_timestamp(skb))
2734 		return;
2735 
2736 	status = netif_rx(skb);
2737 	if (status != NET_RX_SUCCESS)
2738 		netif_dbg(dev, rx_err, dev->net,
2739 			  "netif_rx status %d\n", status);
2740 }
2741 
2742 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2743 {
2744 	if (skb->len < dev->net->hard_header_len)
2745 		return 0;
2746 
2747 	while (skb->len > 0) {
2748 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2749 		u16 rx_cmd_c;
2750 		struct sk_buff *skb2;
2751 		unsigned char *packet;
2752 
2753 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2754 		le32_to_cpus(&rx_cmd_a);
2755 		skb_pull(skb, sizeof(rx_cmd_a));
2756 
2757 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2758 		le32_to_cpus(&rx_cmd_b);
2759 		skb_pull(skb, sizeof(rx_cmd_b));
2760 
2761 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2762 		le16_to_cpus(&rx_cmd_c);
2763 		skb_pull(skb, sizeof(rx_cmd_c));
2764 
2765 		packet = skb->data;
2766 
2767 		/* get the packet length */
2768 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2769 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2770 
2771 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2772 			netif_dbg(dev, rx_err, dev->net,
2773 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2774 		} else {
2775 			/* last frame in this batch */
2776 			if (skb->len == size) {
2777 				lan78xx_rx_csum_offload(dev, skb,
2778 							rx_cmd_a, rx_cmd_b);
2779 
2780 				skb_trim(skb, skb->len - 4); /* remove fcs */
2781 				skb->truesize = size + sizeof(struct sk_buff);
2782 
2783 				return 1;
2784 			}
2785 
2786 			skb2 = skb_clone(skb, GFP_ATOMIC);
2787 			if (unlikely(!skb2)) {
2788 				netdev_warn(dev->net, "Error allocating skb");
2789 				return 0;
2790 			}
2791 
2792 			skb2->len = size;
2793 			skb2->data = packet;
2794 			skb_set_tail_pointer(skb2, size);
2795 
2796 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2797 
2798 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2799 			skb2->truesize = size + sizeof(struct sk_buff);
2800 
2801 			lan78xx_skb_return(dev, skb2);
2802 		}
2803 
2804 		skb_pull(skb, size);
2805 
2806 		/* padding bytes before the next frame starts */
2807 		if (skb->len)
2808 			skb_pull(skb, align_count);
2809 	}
2810 
2811 	return 1;
2812 }
2813 
2814 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2815 {
2816 	if (!lan78xx_rx(dev, skb)) {
2817 		dev->net->stats.rx_errors++;
2818 		goto done;
2819 	}
2820 
2821 	if (skb->len) {
2822 		lan78xx_skb_return(dev, skb);
2823 		return;
2824 	}
2825 
2826 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2827 	dev->net->stats.rx_errors++;
2828 done:
2829 	skb_queue_tail(&dev->done, skb);
2830 }
2831 
2832 static void rx_complete(struct urb *urb);
2833 
2834 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2835 {
2836 	struct sk_buff *skb;
2837 	struct skb_data *entry;
2838 	unsigned long lockflags;
2839 	size_t size = dev->rx_urb_size;
2840 	int ret = 0;
2841 
2842 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2843 	if (!skb) {
2844 		usb_free_urb(urb);
2845 		return -ENOMEM;
2846 	}
2847 
2848 	entry = (struct skb_data *)skb->cb;
2849 	entry->urb = urb;
2850 	entry->dev = dev;
2851 	entry->length = 0;
2852 
2853 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2854 			  skb->data, size, rx_complete, skb);
2855 
2856 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2857 
2858 	if (netif_device_present(dev->net) &&
2859 	    netif_running(dev->net) &&
2860 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2861 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2862 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2863 		switch (ret) {
2864 		case 0:
2865 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2866 			break;
2867 		case -EPIPE:
2868 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2869 			break;
2870 		case -ENODEV:
2871 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2872 			netif_device_detach(dev->net);
2873 			break;
2874 		case -EHOSTUNREACH:
2875 			ret = -ENOLINK;
2876 			break;
2877 		default:
2878 			netif_dbg(dev, rx_err, dev->net,
2879 				  "rx submit, %d\n", ret);
2880 			tasklet_schedule(&dev->bh);
2881 		}
2882 	} else {
2883 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2884 		ret = -ENOLINK;
2885 	}
2886 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2887 	if (ret) {
2888 		dev_kfree_skb_any(skb);
2889 		usb_free_urb(urb);
2890 	}
2891 	return ret;
2892 }
2893 
2894 static void rx_complete(struct urb *urb)
2895 {
2896 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2897 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2898 	struct lan78xx_net *dev = entry->dev;
2899 	int urb_status = urb->status;
2900 	enum skb_state state;
2901 
2902 	skb_put(skb, urb->actual_length);
2903 	state = rx_done;
2904 	entry->urb = NULL;
2905 
2906 	switch (urb_status) {
2907 	case 0:
2908 		if (skb->len < dev->net->hard_header_len) {
2909 			state = rx_cleanup;
2910 			dev->net->stats.rx_errors++;
2911 			dev->net->stats.rx_length_errors++;
2912 			netif_dbg(dev, rx_err, dev->net,
2913 				  "rx length %d\n", skb->len);
2914 		}
2915 		usb_mark_last_busy(dev->udev);
2916 		break;
2917 	case -EPIPE:
2918 		dev->net->stats.rx_errors++;
2919 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2920 		/* FALLTHROUGH */
2921 	case -ECONNRESET:				/* async unlink */
2922 	case -ESHUTDOWN:				/* hardware gone */
2923 		netif_dbg(dev, ifdown, dev->net,
2924 			  "rx shutdown, code %d\n", urb_status);
2925 		state = rx_cleanup;
2926 		entry->urb = urb;
2927 		urb = NULL;
2928 		break;
2929 	case -EPROTO:
2930 	case -ETIME:
2931 	case -EILSEQ:
2932 		dev->net->stats.rx_errors++;
2933 		state = rx_cleanup;
2934 		entry->urb = urb;
2935 		urb = NULL;
2936 		break;
2937 
2938 	/* data overrun ... flush fifo? */
2939 	case -EOVERFLOW:
2940 		dev->net->stats.rx_over_errors++;
2941 		/* FALLTHROUGH */
2942 
2943 	default:
2944 		state = rx_cleanup;
2945 		dev->net->stats.rx_errors++;
2946 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2947 		break;
2948 	}
2949 
2950 	state = defer_bh(dev, skb, &dev->rxq, state);
2951 
2952 	if (urb) {
2953 		if (netif_running(dev->net) &&
2954 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2955 		    state != unlink_start) {
2956 			rx_submit(dev, urb, GFP_ATOMIC);
2957 			return;
2958 		}
2959 		usb_free_urb(urb);
2960 	}
2961 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2962 }
2963 
2964 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2965 {
2966 	int length;
2967 	struct urb *urb = NULL;
2968 	struct skb_data *entry;
2969 	unsigned long flags;
2970 	struct sk_buff_head *tqp = &dev->txq_pend;
2971 	struct sk_buff *skb, *skb2;
2972 	int ret;
2973 	int count, pos;
2974 	int skb_totallen, pkt_cnt;
2975 
2976 	skb_totallen = 0;
2977 	pkt_cnt = 0;
2978 	count = 0;
2979 	length = 0;
2980 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2981 		if (skb_is_gso(skb)) {
2982 			if (pkt_cnt) {
2983 				/* handle previous packets first */
2984 				break;
2985 			}
2986 			count = 1;
2987 			length = skb->len - TX_OVERHEAD;
2988 			skb2 = skb_dequeue(tqp);
2989 			goto gso_skb;
2990 		}
2991 
2992 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2993 			break;
2994 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2995 		pkt_cnt++;
2996 	}
2997 
2998 	/* copy to a single skb */
2999 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3000 	if (!skb)
3001 		goto drop;
3002 
3003 	skb_put(skb, skb_totallen);
3004 
3005 	for (count = pos = 0; count < pkt_cnt; count++) {
3006 		skb2 = skb_dequeue(tqp);
3007 		if (skb2) {
3008 			length += (skb2->len - TX_OVERHEAD);
3009 			memcpy(skb->data + pos, skb2->data, skb2->len);
3010 			pos += roundup(skb2->len, sizeof(u32));
3011 			dev_kfree_skb(skb2);
3012 		}
3013 	}
3014 
3015 gso_skb:
3016 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3017 	if (!urb)
3018 		goto drop;
3019 
3020 	entry = (struct skb_data *)skb->cb;
3021 	entry->urb = urb;
3022 	entry->dev = dev;
3023 	entry->length = length;
3024 	entry->num_of_packet = count;
3025 
3026 	spin_lock_irqsave(&dev->txq.lock, flags);
3027 	ret = usb_autopm_get_interface_async(dev->intf);
3028 	if (ret < 0) {
3029 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3030 		goto drop;
3031 	}
3032 
3033 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3034 			  skb->data, skb->len, tx_complete, skb);
3035 
3036 	if (length % dev->maxpacket == 0) {
3037 		/* send USB_ZERO_PACKET */
3038 		urb->transfer_flags |= URB_ZERO_PACKET;
3039 	}
3040 
3041 #ifdef CONFIG_PM
3042 	/* if this triggers the device is still a sleep */
3043 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3044 		/* transmission will be done in resume */
3045 		usb_anchor_urb(urb, &dev->deferred);
3046 		/* no use to process more packets */
3047 		netif_stop_queue(dev->net);
3048 		usb_put_urb(urb);
3049 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3050 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3051 		return;
3052 	}
3053 #endif
3054 
3055 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3056 	switch (ret) {
3057 	case 0:
3058 		netif_trans_update(dev->net);
3059 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3060 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3061 			netif_stop_queue(dev->net);
3062 		break;
3063 	case -EPIPE:
3064 		netif_stop_queue(dev->net);
3065 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3066 		usb_autopm_put_interface_async(dev->intf);
3067 		break;
3068 	default:
3069 		usb_autopm_put_interface_async(dev->intf);
3070 		netif_dbg(dev, tx_err, dev->net,
3071 			  "tx: submit urb err %d\n", ret);
3072 		break;
3073 	}
3074 
3075 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3076 
3077 	if (ret) {
3078 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3079 drop:
3080 		dev->net->stats.tx_dropped++;
3081 		if (skb)
3082 			dev_kfree_skb_any(skb);
3083 		usb_free_urb(urb);
3084 	} else
3085 		netif_dbg(dev, tx_queued, dev->net,
3086 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3087 }
3088 
3089 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3090 {
3091 	struct urb *urb;
3092 	int i;
3093 
3094 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3095 		for (i = 0; i < 10; i++) {
3096 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3097 				break;
3098 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3099 			if (urb)
3100 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3101 					return;
3102 		}
3103 
3104 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3105 			tasklet_schedule(&dev->bh);
3106 	}
3107 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3108 		netif_wake_queue(dev->net);
3109 }
3110 
3111 static void lan78xx_bh(unsigned long param)
3112 {
3113 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3114 	struct sk_buff *skb;
3115 	struct skb_data *entry;
3116 
3117 	while ((skb = skb_dequeue(&dev->done))) {
3118 		entry = (struct skb_data *)(skb->cb);
3119 		switch (entry->state) {
3120 		case rx_done:
3121 			entry->state = rx_cleanup;
3122 			rx_process(dev, skb);
3123 			continue;
3124 		case tx_done:
3125 			usb_free_urb(entry->urb);
3126 			dev_kfree_skb(skb);
3127 			continue;
3128 		case rx_cleanup:
3129 			usb_free_urb(entry->urb);
3130 			dev_kfree_skb(skb);
3131 			continue;
3132 		default:
3133 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3134 			return;
3135 		}
3136 	}
3137 
3138 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3139 		/* reset update timer delta */
3140 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3141 			dev->delta = 1;
3142 			mod_timer(&dev->stat_monitor,
3143 				  jiffies + STAT_UPDATE_TIMER);
3144 		}
3145 
3146 		if (!skb_queue_empty(&dev->txq_pend))
3147 			lan78xx_tx_bh(dev);
3148 
3149 		if (!timer_pending(&dev->delay) &&
3150 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3151 			lan78xx_rx_bh(dev);
3152 	}
3153 }
3154 
3155 static void lan78xx_delayedwork(struct work_struct *work)
3156 {
3157 	int status;
3158 	struct lan78xx_net *dev;
3159 
3160 	dev = container_of(work, struct lan78xx_net, wq.work);
3161 
3162 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3163 		unlink_urbs(dev, &dev->txq);
3164 		status = usb_autopm_get_interface(dev->intf);
3165 		if (status < 0)
3166 			goto fail_pipe;
3167 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3168 		usb_autopm_put_interface(dev->intf);
3169 		if (status < 0 &&
3170 		    status != -EPIPE &&
3171 		    status != -ESHUTDOWN) {
3172 			if (netif_msg_tx_err(dev))
3173 fail_pipe:
3174 				netdev_err(dev->net,
3175 					   "can't clear tx halt, status %d\n",
3176 					   status);
3177 		} else {
3178 			clear_bit(EVENT_TX_HALT, &dev->flags);
3179 			if (status != -ESHUTDOWN)
3180 				netif_wake_queue(dev->net);
3181 		}
3182 	}
3183 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3184 		unlink_urbs(dev, &dev->rxq);
3185 		status = usb_autopm_get_interface(dev->intf);
3186 		if (status < 0)
3187 				goto fail_halt;
3188 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3189 		usb_autopm_put_interface(dev->intf);
3190 		if (status < 0 &&
3191 		    status != -EPIPE &&
3192 		    status != -ESHUTDOWN) {
3193 			if (netif_msg_rx_err(dev))
3194 fail_halt:
3195 				netdev_err(dev->net,
3196 					   "can't clear rx halt, status %d\n",
3197 					   status);
3198 		} else {
3199 			clear_bit(EVENT_RX_HALT, &dev->flags);
3200 			tasklet_schedule(&dev->bh);
3201 		}
3202 	}
3203 
3204 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3205 		int ret = 0;
3206 
3207 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3208 		status = usb_autopm_get_interface(dev->intf);
3209 		if (status < 0)
3210 			goto skip_reset;
3211 		if (lan78xx_link_reset(dev) < 0) {
3212 			usb_autopm_put_interface(dev->intf);
3213 skip_reset:
3214 			netdev_info(dev->net, "link reset failed (%d)\n",
3215 				    ret);
3216 		} else {
3217 			usb_autopm_put_interface(dev->intf);
3218 		}
3219 	}
3220 
3221 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3222 		lan78xx_update_stats(dev);
3223 
3224 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3225 
3226 		mod_timer(&dev->stat_monitor,
3227 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3228 
3229 		dev->delta = min((dev->delta * 2), 50);
3230 	}
3231 }
3232 
3233 static void intr_complete(struct urb *urb)
3234 {
3235 	struct lan78xx_net *dev = urb->context;
3236 	int status = urb->status;
3237 
3238 	switch (status) {
3239 	/* success */
3240 	case 0:
3241 		lan78xx_status(dev, urb);
3242 		break;
3243 
3244 	/* software-driven interface shutdown */
3245 	case -ENOENT:			/* urb killed */
3246 	case -ESHUTDOWN:		/* hardware gone */
3247 		netif_dbg(dev, ifdown, dev->net,
3248 			  "intr shutdown, code %d\n", status);
3249 		return;
3250 
3251 	/* NOTE:  not throttling like RX/TX, since this endpoint
3252 	 * already polls infrequently
3253 	 */
3254 	default:
3255 		netdev_dbg(dev->net, "intr status %d\n", status);
3256 		break;
3257 	}
3258 
3259 	if (!netif_running(dev->net))
3260 		return;
3261 
3262 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3263 	status = usb_submit_urb(urb, GFP_ATOMIC);
3264 	if (status != 0)
3265 		netif_err(dev, timer, dev->net,
3266 			  "intr resubmit --> %d\n", status);
3267 }
3268 
3269 static void lan78xx_disconnect(struct usb_interface *intf)
3270 {
3271 	struct lan78xx_net		*dev;
3272 	struct usb_device		*udev;
3273 	struct net_device		*net;
3274 
3275 	dev = usb_get_intfdata(intf);
3276 	usb_set_intfdata(intf, NULL);
3277 	if (!dev)
3278 		return;
3279 
3280 	udev = interface_to_usbdev(intf);
3281 
3282 	net = dev->net;
3283 	unregister_netdev(net);
3284 
3285 	cancel_delayed_work_sync(&dev->wq);
3286 
3287 	usb_scuttle_anchored_urbs(&dev->deferred);
3288 
3289 	lan78xx_unbind(dev, intf);
3290 
3291 	usb_kill_urb(dev->urb_intr);
3292 	usb_free_urb(dev->urb_intr);
3293 
3294 	free_netdev(net);
3295 	usb_put_dev(udev);
3296 }
3297 
3298 static void lan78xx_tx_timeout(struct net_device *net)
3299 {
3300 	struct lan78xx_net *dev = netdev_priv(net);
3301 
3302 	unlink_urbs(dev, &dev->txq);
3303 	tasklet_schedule(&dev->bh);
3304 }
3305 
3306 static const struct net_device_ops lan78xx_netdev_ops = {
3307 	.ndo_open		= lan78xx_open,
3308 	.ndo_stop		= lan78xx_stop,
3309 	.ndo_start_xmit		= lan78xx_start_xmit,
3310 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3311 	.ndo_change_mtu		= lan78xx_change_mtu,
3312 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3313 	.ndo_validate_addr	= eth_validate_addr,
3314 	.ndo_do_ioctl		= lan78xx_ioctl,
3315 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3316 	.ndo_set_features	= lan78xx_set_features,
3317 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3318 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3319 };
3320 
3321 static void lan78xx_stat_monitor(unsigned long param)
3322 {
3323 	struct lan78xx_net *dev;
3324 
3325 	dev = (struct lan78xx_net *)param;
3326 
3327 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3328 }
3329 
3330 static int lan78xx_probe(struct usb_interface *intf,
3331 			 const struct usb_device_id *id)
3332 {
3333 	struct lan78xx_net *dev;
3334 	struct net_device *netdev;
3335 	struct usb_device *udev;
3336 	int ret;
3337 	unsigned maxp;
3338 	unsigned period;
3339 	u8 *buf = NULL;
3340 
3341 	udev = interface_to_usbdev(intf);
3342 	udev = usb_get_dev(udev);
3343 
3344 	ret = -ENOMEM;
3345 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3346 	if (!netdev) {
3347 			dev_err(&intf->dev, "Error: OOM\n");
3348 			goto out1;
3349 	}
3350 
3351 	/* netdev_printk() needs this */
3352 	SET_NETDEV_DEV(netdev, &intf->dev);
3353 
3354 	dev = netdev_priv(netdev);
3355 	dev->udev = udev;
3356 	dev->intf = intf;
3357 	dev->net = netdev;
3358 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3359 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3360 
3361 	skb_queue_head_init(&dev->rxq);
3362 	skb_queue_head_init(&dev->txq);
3363 	skb_queue_head_init(&dev->done);
3364 	skb_queue_head_init(&dev->rxq_pause);
3365 	skb_queue_head_init(&dev->txq_pend);
3366 	mutex_init(&dev->phy_mutex);
3367 
3368 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3369 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3370 	init_usb_anchor(&dev->deferred);
3371 
3372 	netdev->netdev_ops = &lan78xx_netdev_ops;
3373 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3374 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3375 
3376 	dev->stat_monitor.function = lan78xx_stat_monitor;
3377 	dev->stat_monitor.data = (unsigned long)dev;
3378 	dev->delta = 1;
3379 	init_timer(&dev->stat_monitor);
3380 
3381 	mutex_init(&dev->stats.access_lock);
3382 
3383 	ret = lan78xx_bind(dev, intf);
3384 	if (ret < 0)
3385 		goto out2;
3386 	strcpy(netdev->name, "eth%d");
3387 
3388 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3389 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3390 
3391 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3392 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3393 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3394 
3395 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3396 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3397 
3398 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3399 					dev->ep_intr->desc.bEndpointAddress &
3400 					USB_ENDPOINT_NUMBER_MASK);
3401 	period = dev->ep_intr->desc.bInterval;
3402 
3403 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3404 	buf = kmalloc(maxp, GFP_KERNEL);
3405 	if (buf) {
3406 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3407 		if (!dev->urb_intr) {
3408 			kfree(buf);
3409 			goto out3;
3410 		} else {
3411 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3412 					 dev->pipe_intr, buf, maxp,
3413 					 intr_complete, dev, period);
3414 		}
3415 	}
3416 
3417 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3418 
3419 	/* driver requires remote-wakeup capability during autosuspend. */
3420 	intf->needs_remote_wakeup = 1;
3421 
3422 	ret = register_netdev(netdev);
3423 	if (ret != 0) {
3424 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3425 		goto out2;
3426 	}
3427 
3428 	usb_set_intfdata(intf, dev);
3429 
3430 	ret = device_set_wakeup_enable(&udev->dev, true);
3431 
3432 	 /* Default delay of 2sec has more overhead than advantage.
3433 	  * Set to 10sec as default.
3434 	  */
3435 	pm_runtime_set_autosuspend_delay(&udev->dev,
3436 					 DEFAULT_AUTOSUSPEND_DELAY);
3437 
3438 	return 0;
3439 
3440 out3:
3441 	lan78xx_unbind(dev, intf);
3442 out2:
3443 	free_netdev(netdev);
3444 out1:
3445 	usb_put_dev(udev);
3446 
3447 	return ret;
3448 }
3449 
3450 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3451 {
3452 	const u16 crc16poly = 0x8005;
3453 	int i;
3454 	u16 bit, crc, msb;
3455 	u8 data;
3456 
3457 	crc = 0xFFFF;
3458 	for (i = 0; i < len; i++) {
3459 		data = *buf++;
3460 		for (bit = 0; bit < 8; bit++) {
3461 			msb = crc >> 15;
3462 			crc <<= 1;
3463 
3464 			if (msb ^ (u16)(data & 1)) {
3465 				crc ^= crc16poly;
3466 				crc |= (u16)0x0001U;
3467 			}
3468 			data >>= 1;
3469 		}
3470 	}
3471 
3472 	return crc;
3473 }
3474 
3475 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3476 {
3477 	u32 buf;
3478 	int ret;
3479 	int mask_index;
3480 	u16 crc;
3481 	u32 temp_wucsr;
3482 	u32 temp_pmt_ctl;
3483 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3484 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3485 	const u8 arp_type[2] = { 0x08, 0x06 };
3486 
3487 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3488 	buf &= ~MAC_TX_TXEN_;
3489 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3490 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3491 	buf &= ~MAC_RX_RXEN_;
3492 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3493 
3494 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3495 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3496 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3497 
3498 	temp_wucsr = 0;
3499 
3500 	temp_pmt_ctl = 0;
3501 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3502 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3503 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3504 
3505 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3506 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3507 
3508 	mask_index = 0;
3509 	if (wol & WAKE_PHY) {
3510 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3511 
3512 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3513 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3514 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3515 	}
3516 	if (wol & WAKE_MAGIC) {
3517 		temp_wucsr |= WUCSR_MPEN_;
3518 
3519 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3520 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3521 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3522 	}
3523 	if (wol & WAKE_BCAST) {
3524 		temp_wucsr |= WUCSR_BCST_EN_;
3525 
3526 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3527 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3528 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3529 	}
3530 	if (wol & WAKE_MCAST) {
3531 		temp_wucsr |= WUCSR_WAKE_EN_;
3532 
3533 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3534 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3535 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3536 					WUF_CFGX_EN_ |
3537 					WUF_CFGX_TYPE_MCAST_ |
3538 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3539 					(crc & WUF_CFGX_CRC16_MASK_));
3540 
3541 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3542 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3543 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3544 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3545 		mask_index++;
3546 
3547 		/* for IPv6 Multicast */
3548 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3549 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3550 					WUF_CFGX_EN_ |
3551 					WUF_CFGX_TYPE_MCAST_ |
3552 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3553 					(crc & WUF_CFGX_CRC16_MASK_));
3554 
3555 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3556 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3557 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3558 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3559 		mask_index++;
3560 
3561 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3562 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3563 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3564 	}
3565 	if (wol & WAKE_UCAST) {
3566 		temp_wucsr |= WUCSR_PFDA_EN_;
3567 
3568 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3569 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3570 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3571 	}
3572 	if (wol & WAKE_ARP) {
3573 		temp_wucsr |= WUCSR_WAKE_EN_;
3574 
3575 		/* set WUF_CFG & WUF_MASK
3576 		 * for packettype (offset 12,13) = ARP (0x0806)
3577 		 */
3578 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3579 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3580 					WUF_CFGX_EN_ |
3581 					WUF_CFGX_TYPE_ALL_ |
3582 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3583 					(crc & WUF_CFGX_CRC16_MASK_));
3584 
3585 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3586 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3587 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3588 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3589 		mask_index++;
3590 
3591 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3592 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3593 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3594 	}
3595 
3596 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3597 
3598 	/* when multiple WOL bits are set */
3599 	if (hweight_long((unsigned long)wol) > 1) {
3600 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3601 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3602 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3603 	}
3604 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3605 
3606 	/* clear WUPS */
3607 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3608 	buf |= PMT_CTL_WUPS_MASK_;
3609 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3610 
3611 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3612 	buf |= MAC_RX_RXEN_;
3613 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3614 
3615 	return 0;
3616 }
3617 
3618 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3619 {
3620 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3621 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3622 	u32 buf;
3623 	int ret;
3624 	int event;
3625 
3626 	event = message.event;
3627 
3628 	if (!dev->suspend_count++) {
3629 		spin_lock_irq(&dev->txq.lock);
3630 		/* don't autosuspend while transmitting */
3631 		if ((skb_queue_len(&dev->txq) ||
3632 		     skb_queue_len(&dev->txq_pend)) &&
3633 			PMSG_IS_AUTO(message)) {
3634 			spin_unlock_irq(&dev->txq.lock);
3635 			ret = -EBUSY;
3636 			goto out;
3637 		} else {
3638 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3639 			spin_unlock_irq(&dev->txq.lock);
3640 		}
3641 
3642 		/* stop TX & RX */
3643 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3644 		buf &= ~MAC_TX_TXEN_;
3645 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3646 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3647 		buf &= ~MAC_RX_RXEN_;
3648 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3649 
3650 		/* empty out the rx and queues */
3651 		netif_device_detach(dev->net);
3652 		lan78xx_terminate_urbs(dev);
3653 		usb_kill_urb(dev->urb_intr);
3654 
3655 		/* reattach */
3656 		netif_device_attach(dev->net);
3657 	}
3658 
3659 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3660 		del_timer(&dev->stat_monitor);
3661 
3662 		if (PMSG_IS_AUTO(message)) {
3663 			/* auto suspend (selective suspend) */
3664 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3665 			buf &= ~MAC_TX_TXEN_;
3666 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3667 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3668 			buf &= ~MAC_RX_RXEN_;
3669 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3670 
3671 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3672 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3673 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3674 
3675 			/* set goodframe wakeup */
3676 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3677 
3678 			buf |= WUCSR_RFE_WAKE_EN_;
3679 			buf |= WUCSR_STORE_WAKE_;
3680 
3681 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3682 
3683 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3684 
3685 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3686 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3687 
3688 			buf |= PMT_CTL_PHY_WAKE_EN_;
3689 			buf |= PMT_CTL_WOL_EN_;
3690 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3691 			buf |= PMT_CTL_SUS_MODE_3_;
3692 
3693 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3694 
3695 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3696 
3697 			buf |= PMT_CTL_WUPS_MASK_;
3698 
3699 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3700 
3701 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3702 			buf |= MAC_RX_RXEN_;
3703 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3704 		} else {
3705 			lan78xx_set_suspend(dev, pdata->wol);
3706 		}
3707 	}
3708 
3709 	ret = 0;
3710 out:
3711 	return ret;
3712 }
3713 
3714 static int lan78xx_resume(struct usb_interface *intf)
3715 {
3716 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3717 	struct sk_buff *skb;
3718 	struct urb *res;
3719 	int ret;
3720 	u32 buf;
3721 
3722 	if (!timer_pending(&dev->stat_monitor)) {
3723 		dev->delta = 1;
3724 		mod_timer(&dev->stat_monitor,
3725 			  jiffies + STAT_UPDATE_TIMER);
3726 	}
3727 
3728 	if (!--dev->suspend_count) {
3729 		/* resume interrupt URBs */
3730 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3731 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3732 
3733 		spin_lock_irq(&dev->txq.lock);
3734 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3735 			skb = (struct sk_buff *)res->context;
3736 			ret = usb_submit_urb(res, GFP_ATOMIC);
3737 			if (ret < 0) {
3738 				dev_kfree_skb_any(skb);
3739 				usb_free_urb(res);
3740 				usb_autopm_put_interface_async(dev->intf);
3741 			} else {
3742 				netif_trans_update(dev->net);
3743 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3744 			}
3745 		}
3746 
3747 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3748 		spin_unlock_irq(&dev->txq.lock);
3749 
3750 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3751 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3752 				netif_start_queue(dev->net);
3753 			tasklet_schedule(&dev->bh);
3754 		}
3755 	}
3756 
3757 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3758 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3759 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3760 
3761 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3762 					     WUCSR2_ARP_RCD_ |
3763 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3764 					     WUCSR2_IPV4_TCPSYN_RCD_);
3765 
3766 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3767 					    WUCSR_EEE_RX_WAKE_ |
3768 					    WUCSR_PFDA_FR_ |
3769 					    WUCSR_RFE_WAKE_FR_ |
3770 					    WUCSR_WUFR_ |
3771 					    WUCSR_MPR_ |
3772 					    WUCSR_BCST_FR_);
3773 
3774 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3775 	buf |= MAC_TX_TXEN_;
3776 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3777 
3778 	return 0;
3779 }
3780 
3781 static int lan78xx_reset_resume(struct usb_interface *intf)
3782 {
3783 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3784 
3785 	lan78xx_reset(dev);
3786 
3787 	lan78xx_phy_init(dev);
3788 
3789 	return lan78xx_resume(intf);
3790 }
3791 
3792 static const struct usb_device_id products[] = {
3793 	{
3794 	/* LAN7800 USB Gigabit Ethernet Device */
3795 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3796 	},
3797 	{
3798 	/* LAN7850 USB Gigabit Ethernet Device */
3799 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3800 	},
3801 	{},
3802 };
3803 MODULE_DEVICE_TABLE(usb, products);
3804 
3805 static struct usb_driver lan78xx_driver = {
3806 	.name			= DRIVER_NAME,
3807 	.id_table		= products,
3808 	.probe			= lan78xx_probe,
3809 	.disconnect		= lan78xx_disconnect,
3810 	.suspend		= lan78xx_suspend,
3811 	.resume			= lan78xx_resume,
3812 	.reset_resume		= lan78xx_reset_resume,
3813 	.supports_autosuspend	= 1,
3814 	.disable_hub_initiated_lpm = 1,
3815 };
3816 
3817 module_usb_driver(lan78xx_driver);
3818 
3819 MODULE_AUTHOR(DRIVER_AUTHOR);
3820 MODULE_DESCRIPTION(DRIVER_DESC);
3821 MODULE_LICENSE("GPL");
3822