xref: /linux/drivers/net/usb/lan78xx.c (revision 18a7e218cfcdca6666e1f7356533e4c988780b57)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/selftests.h>
24 #include <net/vxlan.h>
25 #include <linux/interrupt.h>
26 #include <linux/irqdomain.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/microchipphy.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
lan78xx_get_buf(struct sk_buff_head * buf_pool)476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
lan78xx_get_rx_buf(struct lan78xx_net * dev)555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
lan78xx_free_rx_resources(struct lan78xx_net * dev)566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
lan78xx_get_tx_buf(struct lan78xx_net * dev)577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
lan78xx_free_tx_resources(struct lan78xx_net * dev)588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
lan78xx_update_stats(struct lan78xx_net * dev)775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
lan78xx_start_tx_path(struct lan78xx_net * dev)851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
lan78xx_stop_tx_path(struct lan78xx_net * dev)872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
lan78xx_start_rx_path(struct lan78xx_net * dev)901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
lan78xx_stop_rx_path(struct lan78xx_net * dev)922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
lan78xx_mdiobus_wait_not_busy(struct lan78xx_net * dev)952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
mii_access(int id,int index,int read)970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
lan78xx_wait_eeprom(struct lan78xx_net * dev)985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1084 		int rc = lan78xx_write_reg(dev, HW_CFG, saved);
1085 		/* If USB fails, there is nothing to do */
1086 		if (rc < 0)
1087 			return rc;
1088 	}
1089 	return ret;
1090 }
1091 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1092 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1093 			       u32 length, u8 *data)
1094 {
1095 	int ret;
1096 	u8 sig;
1097 
1098 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1099 	if (ret < 0)
1100 		return ret;
1101 
1102 	if (sig != EEPROM_INDICATOR)
1103 		return -ENODATA;
1104 
1105 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1106 }
1107 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1108 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1109 				    u32 length, u8 *data)
1110 {
1111 	u32 val;
1112 	u32 saved;
1113 	int i, ret;
1114 
1115 	/* depends on chip, some EEPROM pins are muxed with LED function.
1116 	 * disable & restore LED function to access EEPROM.
1117 	 */
1118 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1119 	if (ret < 0)
1120 		return ret;
1121 
1122 	saved = val;
1123 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1124 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1125 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1126 		if (ret < 0)
1127 			return ret;
1128 	}
1129 
1130 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1131 	/* Looks like not USB specific error, try to recover */
1132 	if (ret == -ETIMEDOUT)
1133 		goto write_raw_eeprom_done;
1134 	/* If USB fails, there is nothing to do */
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	/* Issue write/erase enable command */
1139 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1140 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1141 	if (ret < 0)
1142 		return ret;
1143 
1144 	ret = lan78xx_wait_eeprom(dev);
1145 	/* Looks like not USB specific error, try to recover */
1146 	if (ret == -ETIMEDOUT)
1147 		goto write_raw_eeprom_done;
1148 	/* If USB fails, there is nothing to do */
1149 	if (ret < 0)
1150 		return ret;
1151 
1152 	for (i = 0; i < length; i++) {
1153 		/* Fill data register */
1154 		val = data[i];
1155 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1156 		if (ret < 0)
1157 			return ret;
1158 
1159 		/* Send "write" command */
1160 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1161 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1162 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1163 		if (ret < 0)
1164 			return ret;
1165 
1166 		ret = lan78xx_wait_eeprom(dev);
1167 		/* Looks like not USB specific error, try to recover */
1168 		if (ret == -ETIMEDOUT)
1169 			goto write_raw_eeprom_done;
1170 		/* If USB fails, there is nothing to do */
1171 		if (ret < 0)
1172 			return ret;
1173 
1174 		offset++;
1175 	}
1176 
1177 write_raw_eeprom_done:
1178 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1179 		return lan78xx_write_reg(dev, HW_CFG, saved);
1180 
1181 	return 0;
1182 }
1183 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1184 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1185 				u32 length, u8 *data)
1186 {
1187 	unsigned long timeout;
1188 	int ret, i;
1189 	u32 buf;
1190 
1191 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1192 	if (ret < 0)
1193 		return ret;
1194 
1195 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1196 		/* clear it and wait to be cleared */
1197 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1198 		if (ret < 0)
1199 			return ret;
1200 
1201 		timeout = jiffies + HZ;
1202 		do {
1203 			usleep_range(1, 10);
1204 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1205 			if (ret < 0)
1206 				return ret;
1207 
1208 			if (time_after(jiffies, timeout)) {
1209 				netdev_warn(dev->net,
1210 					    "timeout on OTP_PWR_DN");
1211 				return -ETIMEDOUT;
1212 			}
1213 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1214 	}
1215 
1216 	for (i = 0; i < length; i++) {
1217 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1218 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1219 		if (ret < 0)
1220 			return ret;
1221 
1222 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1223 					((offset + i) & OTP_ADDR2_10_3));
1224 		if (ret < 0)
1225 			return ret;
1226 
1227 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1228 		if (ret < 0)
1229 			return ret;
1230 
1231 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1232 		if (ret < 0)
1233 			return ret;
1234 
1235 		timeout = jiffies + HZ;
1236 		do {
1237 			udelay(1);
1238 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1239 			if (ret < 0)
1240 				return ret;
1241 
1242 			if (time_after(jiffies, timeout)) {
1243 				netdev_warn(dev->net,
1244 					    "timeout on OTP_STATUS");
1245 				return -ETIMEDOUT;
1246 			}
1247 		} while (buf & OTP_STATUS_BUSY_);
1248 
1249 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1250 		if (ret < 0)
1251 			return ret;
1252 
1253 		data[i] = (u8)(buf & 0xFF);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1259 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1260 				 u32 length, u8 *data)
1261 {
1262 	int i;
1263 	u32 buf;
1264 	unsigned long timeout;
1265 	int ret;
1266 
1267 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1268 	if (ret < 0)
1269 		return ret;
1270 
1271 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1272 		/* clear it and wait to be cleared */
1273 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1274 		if (ret < 0)
1275 			return ret;
1276 
1277 		timeout = jiffies + HZ;
1278 		do {
1279 			udelay(1);
1280 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1281 			if (ret < 0)
1282 				return ret;
1283 
1284 			if (time_after(jiffies, timeout)) {
1285 				netdev_warn(dev->net,
1286 					    "timeout on OTP_PWR_DN completion");
1287 				return -ETIMEDOUT;
1288 			}
1289 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1290 	}
1291 
1292 	/* set to BYTE program mode */
1293 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1294 	if (ret < 0)
1295 		return ret;
1296 
1297 	for (i = 0; i < length; i++) {
1298 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1299 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1300 		if (ret < 0)
1301 			return ret;
1302 
1303 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1304 					((offset + i) & OTP_ADDR2_10_3));
1305 		if (ret < 0)
1306 			return ret;
1307 
1308 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1309 		if (ret < 0)
1310 			return ret;
1311 
1312 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1313 		if (ret < 0)
1314 			return ret;
1315 
1316 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1317 		if (ret < 0)
1318 			return ret;
1319 
1320 		timeout = jiffies + HZ;
1321 		do {
1322 			udelay(1);
1323 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1324 			if (ret < 0)
1325 				return ret;
1326 
1327 			if (time_after(jiffies, timeout)) {
1328 				netdev_warn(dev->net,
1329 					    "Timeout on OTP_STATUS completion");
1330 				return -ETIMEDOUT;
1331 			}
1332 		} while (buf & OTP_STATUS_BUSY_);
1333 	}
1334 
1335 	return 0;
1336 }
1337 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1338 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1339 			    u32 length, u8 *data)
1340 {
1341 	u8 sig;
1342 	int ret;
1343 
1344 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1345 
1346 	if (ret == 0) {
1347 		if (sig == OTP_INDICATOR_2)
1348 			offset += 0x100;
1349 		else if (sig != OTP_INDICATOR_1)
1350 			ret = -EINVAL;
1351 		if (!ret)
1352 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1353 	}
1354 
1355 	return ret;
1356 }
1357 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1358 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1359 {
1360 	int i, ret;
1361 
1362 	for (i = 0; i < 100; i++) {
1363 		u32 dp_sel;
1364 
1365 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1366 		if (unlikely(ret < 0))
1367 			return ret;
1368 
1369 		if (dp_sel & DP_SEL_DPRDY_)
1370 			return 0;
1371 
1372 		usleep_range(40, 100);
1373 	}
1374 
1375 	netdev_warn(dev->net, "%s timed out", __func__);
1376 
1377 	return -ETIMEDOUT;
1378 }
1379 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1380 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1381 				  u32 addr, u32 length, u32 *buf)
1382 {
1383 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1384 	int i, ret;
1385 
1386 	ret = usb_autopm_get_interface(dev->intf);
1387 	if (ret < 0)
1388 		return ret;
1389 
1390 	mutex_lock(&pdata->dataport_mutex);
1391 
1392 	ret = lan78xx_dataport_wait_not_busy(dev);
1393 	if (ret < 0)
1394 		goto dataport_write;
1395 
1396 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1397 	if (ret < 0)
1398 		goto dataport_write;
1399 
1400 	for (i = 0; i < length; i++) {
1401 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1402 		if (ret < 0)
1403 			goto dataport_write;
1404 
1405 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1406 		if (ret < 0)
1407 			goto dataport_write;
1408 
1409 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1410 		if (ret < 0)
1411 			goto dataport_write;
1412 
1413 		ret = lan78xx_dataport_wait_not_busy(dev);
1414 		if (ret < 0)
1415 			goto dataport_write;
1416 	}
1417 
1418 dataport_write:
1419 	if (ret < 0)
1420 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1421 
1422 	mutex_unlock(&pdata->dataport_mutex);
1423 	usb_autopm_put_interface(dev->intf);
1424 
1425 	return ret;
1426 }
1427 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1428 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1429 				    int index, u8 addr[ETH_ALEN])
1430 {
1431 	u32 temp;
1432 
1433 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1434 		temp = addr[3];
1435 		temp = addr[2] | (temp << 8);
1436 		temp = addr[1] | (temp << 8);
1437 		temp = addr[0] | (temp << 8);
1438 		pdata->pfilter_table[index][1] = temp;
1439 		temp = addr[5];
1440 		temp = addr[4] | (temp << 8);
1441 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1442 		pdata->pfilter_table[index][0] = temp;
1443 	}
1444 }
1445 
1446 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1447 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1448 {
1449 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1450 }
1451 
lan78xx_deferred_multicast_write(struct work_struct * param)1452 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1453 {
1454 	struct lan78xx_priv *pdata =
1455 			container_of(param, struct lan78xx_priv, set_multicast);
1456 	struct lan78xx_net *dev = pdata->dev;
1457 	int i, ret;
1458 
1459 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1460 		  pdata->rfe_ctl);
1461 
1462 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1463 				     DP_SEL_VHF_VLAN_LEN,
1464 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1465 	if (ret < 0)
1466 		goto multicast_write_done;
1467 
1468 	for (i = 1; i < NUM_OF_MAF; i++) {
1469 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1470 		if (ret < 0)
1471 			goto multicast_write_done;
1472 
1473 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1474 					pdata->pfilter_table[i][1]);
1475 		if (ret < 0)
1476 			goto multicast_write_done;
1477 
1478 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1479 					pdata->pfilter_table[i][0]);
1480 		if (ret < 0)
1481 			goto multicast_write_done;
1482 	}
1483 
1484 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1485 
1486 multicast_write_done:
1487 	if (ret < 0)
1488 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1489 	return;
1490 }
1491 
lan78xx_set_multicast(struct net_device * netdev)1492 static void lan78xx_set_multicast(struct net_device *netdev)
1493 {
1494 	struct lan78xx_net *dev = netdev_priv(netdev);
1495 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1496 	unsigned long flags;
1497 	int i;
1498 
1499 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1500 
1501 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1502 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1503 
1504 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1505 		pdata->mchash_table[i] = 0;
1506 
1507 	/* pfilter_table[0] has own HW address */
1508 	for (i = 1; i < NUM_OF_MAF; i++) {
1509 		pdata->pfilter_table[i][0] = 0;
1510 		pdata->pfilter_table[i][1] = 0;
1511 	}
1512 
1513 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1514 
1515 	if (dev->net->flags & IFF_PROMISC) {
1516 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1517 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1518 	} else {
1519 		if (dev->net->flags & IFF_ALLMULTI) {
1520 			netif_dbg(dev, drv, dev->net,
1521 				  "receive all multicast enabled");
1522 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1523 		}
1524 	}
1525 
1526 	if (netdev_mc_count(dev->net)) {
1527 		struct netdev_hw_addr *ha;
1528 		int i;
1529 
1530 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1531 
1532 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1533 
1534 		i = 1;
1535 		netdev_for_each_mc_addr(ha, netdev) {
1536 			/* set first 32 into Perfect Filter */
1537 			if (i < 33) {
1538 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1539 			} else {
1540 				u32 bitnum = lan78xx_hash(ha->addr);
1541 
1542 				pdata->mchash_table[bitnum / 32] |=
1543 							(1 << (bitnum % 32));
1544 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1545 			}
1546 			i++;
1547 		}
1548 	}
1549 
1550 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1551 
1552 	/* defer register writes to a sleepable context */
1553 	schedule_work(&pdata->set_multicast);
1554 }
1555 
1556 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1557 
lan78xx_mac_reset(struct lan78xx_net * dev)1558 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1559 {
1560 	unsigned long start_time = jiffies;
1561 	u32 val;
1562 	int ret;
1563 
1564 	mutex_lock(&dev->mdiobus_mutex);
1565 
1566 	/* Resetting the device while there is activity on the MDIO
1567 	 * bus can result in the MAC interface locking up and not
1568 	 * completing register access transactions.
1569 	 */
1570 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1571 	if (ret < 0)
1572 		goto exit_unlock;
1573 
1574 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1575 	if (ret < 0)
1576 		goto exit_unlock;
1577 
1578 	val |= MAC_CR_RST_;
1579 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1580 	if (ret < 0)
1581 		goto exit_unlock;
1582 
1583 	/* Wait for the reset to complete before allowing any further
1584 	 * MAC register accesses otherwise the MAC may lock up.
1585 	 */
1586 	do {
1587 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1588 		if (ret < 0)
1589 			goto exit_unlock;
1590 
1591 		if (!(val & MAC_CR_RST_)) {
1592 			ret = 0;
1593 			goto exit_unlock;
1594 		}
1595 	} while (!time_after(jiffies, start_time + HZ));
1596 
1597 	ret = -ETIMEDOUT;
1598 exit_unlock:
1599 	mutex_unlock(&dev->mdiobus_mutex);
1600 
1601 	return ret;
1602 }
1603 
1604 /**
1605  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1606  * @dev: pointer to the LAN78xx device structure
1607  *
1608  * This function acknowledges the PHY interrupt by setting the
1609  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1610  *
1611  * Return: 0 on success or a negative error code on failure.
1612  */
lan78xx_phy_int_ack(struct lan78xx_net * dev)1613 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1614 {
1615 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1616 }
1617 
1618 /* some work can't be done in tasklets, so we use keventd
1619  *
1620  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1621  * but tasklet_schedule() doesn't.	hope the failure is rare.
1622  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1623 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1624 {
1625 	set_bit(work, &dev->flags);
1626 	if (!schedule_delayed_work(&dev->wq, 0))
1627 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1628 }
1629 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1630 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1631 {
1632 	u32 intdata;
1633 
1634 	if (urb->actual_length != 4) {
1635 		netdev_warn(dev->net,
1636 			    "unexpected urb length %d", urb->actual_length);
1637 		return;
1638 	}
1639 
1640 	intdata = get_unaligned_le32(urb->transfer_buffer);
1641 
1642 	if (intdata & INT_ENP_PHY_INT) {
1643 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1644 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1645 
1646 		if (dev->domain_data.phyirq > 0)
1647 			generic_handle_irq_safe(dev->domain_data.phyirq);
1648 	} else {
1649 		netdev_warn(dev->net,
1650 			    "unexpected interrupt: 0x%08x\n", intdata);
1651 	}
1652 }
1653 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1654 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1655 {
1656 	return MAX_EEPROM_SIZE;
1657 }
1658 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1659 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1660 				      struct ethtool_eeprom *ee, u8 *data)
1661 {
1662 	struct lan78xx_net *dev = netdev_priv(netdev);
1663 	int ret;
1664 
1665 	ret = usb_autopm_get_interface(dev->intf);
1666 	if (ret)
1667 		return ret;
1668 
1669 	ee->magic = LAN78XX_EEPROM_MAGIC;
1670 
1671 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1672 
1673 	usb_autopm_put_interface(dev->intf);
1674 
1675 	return ret;
1676 }
1677 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1678 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1679 				      struct ethtool_eeprom *ee, u8 *data)
1680 {
1681 	struct lan78xx_net *dev = netdev_priv(netdev);
1682 	int ret;
1683 
1684 	ret = usb_autopm_get_interface(dev->intf);
1685 	if (ret)
1686 		return ret;
1687 
1688 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1689 	 * to load data from EEPROM
1690 	 */
1691 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1692 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1693 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1694 		 (ee->offset == 0) &&
1695 		 (ee->len == 512) &&
1696 		 (data[0] == OTP_INDICATOR_1))
1697 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1698 
1699 	usb_autopm_put_interface(dev->intf);
1700 
1701 	return ret;
1702 }
1703 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1704 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1705 				u8 *data)
1706 {
1707 	if (stringset == ETH_SS_STATS)
1708 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1709 	else if (stringset == ETH_SS_TEST)
1710 		net_selftest_get_strings(data);
1711 }
1712 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1713 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1714 {
1715 	if (sset == ETH_SS_STATS)
1716 		return ARRAY_SIZE(lan78xx_gstrings);
1717 	else if (sset == ETH_SS_TEST)
1718 		return net_selftest_get_count();
1719 	else
1720 		return -EOPNOTSUPP;
1721 }
1722 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1723 static void lan78xx_get_stats(struct net_device *netdev,
1724 			      struct ethtool_stats *stats, u64 *data)
1725 {
1726 	struct lan78xx_net *dev = netdev_priv(netdev);
1727 
1728 	lan78xx_update_stats(dev);
1729 
1730 	mutex_lock(&dev->stats.access_lock);
1731 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1732 	mutex_unlock(&dev->stats.access_lock);
1733 }
1734 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1735 static void lan78xx_get_wol(struct net_device *netdev,
1736 			    struct ethtool_wolinfo *wol)
1737 {
1738 	struct lan78xx_net *dev = netdev_priv(netdev);
1739 	int ret;
1740 	u32 buf;
1741 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1742 
1743 	if (usb_autopm_get_interface(dev->intf) < 0)
1744 		return;
1745 
1746 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1747 	if (unlikely(ret < 0)) {
1748 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1749 		wol->supported = 0;
1750 		wol->wolopts = 0;
1751 	} else {
1752 		if (buf & USB_CFG_RMT_WKP_) {
1753 			wol->supported = WAKE_ALL;
1754 			wol->wolopts = pdata->wol;
1755 		} else {
1756 			wol->supported = 0;
1757 			wol->wolopts = 0;
1758 		}
1759 	}
1760 
1761 	usb_autopm_put_interface(dev->intf);
1762 }
1763 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1764 static int lan78xx_set_wol(struct net_device *netdev,
1765 			   struct ethtool_wolinfo *wol)
1766 {
1767 	struct lan78xx_net *dev = netdev_priv(netdev);
1768 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1769 	int ret;
1770 
1771 	if (wol->wolopts & ~WAKE_ALL)
1772 		return -EINVAL;
1773 
1774 	ret = usb_autopm_get_interface(dev->intf);
1775 	if (ret < 0)
1776 		return ret;
1777 
1778 	pdata->wol = wol->wolopts;
1779 
1780 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1781 	if (ret < 0)
1782 		goto exit_pm_put;
1783 
1784 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1785 
1786 exit_pm_put:
1787 	usb_autopm_put_interface(dev->intf);
1788 
1789 	return ret;
1790 }
1791 
lan78xx_get_eee(struct net_device * net,struct ethtool_keee * edata)1792 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1793 {
1794 	struct lan78xx_net *dev = netdev_priv(net);
1795 
1796 	return phylink_ethtool_get_eee(dev->phylink, edata);
1797 }
1798 
lan78xx_set_eee(struct net_device * net,struct ethtool_keee * edata)1799 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1800 {
1801 	struct lan78xx_net *dev = netdev_priv(net);
1802 
1803 	return phylink_ethtool_set_eee(dev->phylink, edata);
1804 }
1805 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1806 static void lan78xx_get_drvinfo(struct net_device *net,
1807 				struct ethtool_drvinfo *info)
1808 {
1809 	struct lan78xx_net *dev = netdev_priv(net);
1810 
1811 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1812 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1813 }
1814 
lan78xx_get_msglevel(struct net_device * net)1815 static u32 lan78xx_get_msglevel(struct net_device *net)
1816 {
1817 	struct lan78xx_net *dev = netdev_priv(net);
1818 
1819 	return dev->msg_enable;
1820 }
1821 
lan78xx_set_msglevel(struct net_device * net,u32 level)1822 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1823 {
1824 	struct lan78xx_net *dev = netdev_priv(net);
1825 
1826 	dev->msg_enable = level;
1827 }
1828 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1829 static int lan78xx_get_link_ksettings(struct net_device *net,
1830 				      struct ethtool_link_ksettings *cmd)
1831 {
1832 	struct lan78xx_net *dev = netdev_priv(net);
1833 
1834 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1835 }
1836 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1837 static int lan78xx_set_link_ksettings(struct net_device *net,
1838 				      const struct ethtool_link_ksettings *cmd)
1839 {
1840 	struct lan78xx_net *dev = netdev_priv(net);
1841 
1842 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1843 }
1844 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1845 static void lan78xx_get_pause(struct net_device *net,
1846 			      struct ethtool_pauseparam *pause)
1847 {
1848 	struct lan78xx_net *dev = netdev_priv(net);
1849 
1850 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1851 }
1852 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1853 static int lan78xx_set_pause(struct net_device *net,
1854 			     struct ethtool_pauseparam *pause)
1855 {
1856 	struct lan78xx_net *dev = netdev_priv(net);
1857 
1858 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1859 }
1860 
lan78xx_get_regs_len(struct net_device * netdev)1861 static int lan78xx_get_regs_len(struct net_device *netdev)
1862 {
1863 	return sizeof(lan78xx_regs);
1864 }
1865 
1866 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1867 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1868 		 void *buf)
1869 {
1870 	struct lan78xx_net *dev = netdev_priv(netdev);
1871 	unsigned int data_count = 0;
1872 	u32 *data = buf;
1873 	int i, ret;
1874 
1875 	/* Read Device/MAC registers */
1876 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1877 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1878 		if (ret < 0) {
1879 			netdev_warn(dev->net,
1880 				    "failed to read register 0x%08x\n",
1881 				    lan78xx_regs[i]);
1882 			goto clean_data;
1883 		}
1884 
1885 		data_count++;
1886 	}
1887 
1888 	return;
1889 
1890 clean_data:
1891 	memset(data, 0, data_count * sizeof(u32));
1892 }
1893 
1894 static const struct ethtool_ops lan78xx_ethtool_ops = {
1895 	.get_link	= ethtool_op_get_link,
1896 	.nway_reset	= phy_ethtool_nway_reset,
1897 	.get_drvinfo	= lan78xx_get_drvinfo,
1898 	.get_msglevel	= lan78xx_get_msglevel,
1899 	.set_msglevel	= lan78xx_set_msglevel,
1900 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1901 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1902 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1903 	.get_ethtool_stats = lan78xx_get_stats,
1904 	.get_sset_count = lan78xx_get_sset_count,
1905 	.self_test	= net_selftest,
1906 	.get_strings	= lan78xx_get_strings,
1907 	.get_wol	= lan78xx_get_wol,
1908 	.set_wol	= lan78xx_set_wol,
1909 	.get_ts_info	= ethtool_op_get_ts_info,
1910 	.get_eee	= lan78xx_get_eee,
1911 	.set_eee	= lan78xx_set_eee,
1912 	.get_pauseparam	= lan78xx_get_pause,
1913 	.set_pauseparam	= lan78xx_set_pause,
1914 	.get_link_ksettings = lan78xx_get_link_ksettings,
1915 	.set_link_ksettings = lan78xx_set_link_ksettings,
1916 	.get_regs_len	= lan78xx_get_regs_len,
1917 	.get_regs	= lan78xx_get_regs,
1918 };
1919 
lan78xx_init_mac_address(struct lan78xx_net * dev)1920 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1921 {
1922 	u32 addr_lo, addr_hi;
1923 	u8 addr[6];
1924 	int ret;
1925 
1926 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1927 	if (ret < 0)
1928 		return ret;
1929 
1930 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1931 	if (ret < 0)
1932 		return ret;
1933 
1934 	addr[0] = addr_lo & 0xFF;
1935 	addr[1] = (addr_lo >> 8) & 0xFF;
1936 	addr[2] = (addr_lo >> 16) & 0xFF;
1937 	addr[3] = (addr_lo >> 24) & 0xFF;
1938 	addr[4] = addr_hi & 0xFF;
1939 	addr[5] = (addr_hi >> 8) & 0xFF;
1940 
1941 	if (!is_valid_ether_addr(addr)) {
1942 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1943 			/* valid address present in Device Tree */
1944 			netif_dbg(dev, ifup, dev->net,
1945 				  "MAC address read from Device Tree");
1946 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1947 						 ETH_ALEN, addr) == 0) ||
1948 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1949 					      ETH_ALEN, addr) == 0)) &&
1950 			   is_valid_ether_addr(addr)) {
1951 			/* eeprom values are valid so use them */
1952 			netif_dbg(dev, ifup, dev->net,
1953 				  "MAC address read from EEPROM");
1954 		} else {
1955 			/* generate random MAC */
1956 			eth_random_addr(addr);
1957 			netif_dbg(dev, ifup, dev->net,
1958 				  "MAC address set to random addr");
1959 		}
1960 
1961 		addr_lo = addr[0] | (addr[1] << 8) |
1962 			  (addr[2] << 16) | (addr[3] << 24);
1963 		addr_hi = addr[4] | (addr[5] << 8);
1964 
1965 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1966 		if (ret < 0)
1967 			return ret;
1968 
1969 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1970 		if (ret < 0)
1971 			return ret;
1972 	}
1973 
1974 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1975 	if (ret < 0)
1976 		return ret;
1977 
1978 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1979 	if (ret < 0)
1980 		return ret;
1981 
1982 	eth_hw_addr_set(dev->net, addr);
1983 
1984 	return 0;
1985 }
1986 
1987 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1988 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1989 {
1990 	struct lan78xx_net *dev = bus->priv;
1991 	u32 val, addr;
1992 	int ret;
1993 
1994 	ret = usb_autopm_get_interface(dev->intf);
1995 	if (ret < 0)
1996 		return ret;
1997 
1998 	mutex_lock(&dev->mdiobus_mutex);
1999 
2000 	/* confirm MII not busy */
2001 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2002 	if (ret < 0)
2003 		goto done;
2004 
2005 	/* set the address, index & direction (read from PHY) */
2006 	addr = mii_access(phy_id, idx, MII_READ);
2007 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2008 	if (ret < 0)
2009 		goto done;
2010 
2011 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2012 	if (ret < 0)
2013 		goto done;
2014 
2015 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2016 	if (ret < 0)
2017 		goto done;
2018 
2019 	ret = (int)(val & 0xFFFF);
2020 
2021 done:
2022 	mutex_unlock(&dev->mdiobus_mutex);
2023 	usb_autopm_put_interface(dev->intf);
2024 
2025 	return ret;
2026 }
2027 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2028 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2029 				 u16 regval)
2030 {
2031 	struct lan78xx_net *dev = bus->priv;
2032 	u32 val, addr;
2033 	int ret;
2034 
2035 	ret = usb_autopm_get_interface(dev->intf);
2036 	if (ret < 0)
2037 		return ret;
2038 
2039 	mutex_lock(&dev->mdiobus_mutex);
2040 
2041 	/* confirm MII not busy */
2042 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2043 	if (ret < 0)
2044 		goto done;
2045 
2046 	val = (u32)regval;
2047 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2048 	if (ret < 0)
2049 		goto done;
2050 
2051 	/* set the address, index & direction (write to PHY) */
2052 	addr = mii_access(phy_id, idx, MII_WRITE);
2053 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2054 	if (ret < 0)
2055 		goto done;
2056 
2057 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2058 	if (ret < 0)
2059 		goto done;
2060 
2061 done:
2062 	mutex_unlock(&dev->mdiobus_mutex);
2063 	usb_autopm_put_interface(dev->intf);
2064 	return ret;
2065 }
2066 
lan78xx_mdio_init(struct lan78xx_net * dev)2067 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2068 {
2069 	struct device_node *node;
2070 	int ret;
2071 
2072 	dev->mdiobus = mdiobus_alloc();
2073 	if (!dev->mdiobus) {
2074 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2075 		return -ENOMEM;
2076 	}
2077 
2078 	dev->mdiobus->priv = (void *)dev;
2079 	dev->mdiobus->read = lan78xx_mdiobus_read;
2080 	dev->mdiobus->write = lan78xx_mdiobus_write;
2081 	dev->mdiobus->name = "lan78xx-mdiobus";
2082 	dev->mdiobus->parent = &dev->udev->dev;
2083 
2084 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2085 		 dev->udev->bus->busnum, dev->udev->devnum);
2086 
2087 	switch (dev->chipid) {
2088 	case ID_REV_CHIP_ID_7800_:
2089 	case ID_REV_CHIP_ID_7850_:
2090 		/* set to internal PHY id */
2091 		dev->mdiobus->phy_mask = ~(1 << 1);
2092 		break;
2093 	case ID_REV_CHIP_ID_7801_:
2094 		/* scan thru PHYAD[2..0] */
2095 		dev->mdiobus->phy_mask = ~(0xFF);
2096 		break;
2097 	}
2098 
2099 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2100 	ret = of_mdiobus_register(dev->mdiobus, node);
2101 	of_node_put(node);
2102 	if (ret) {
2103 		netdev_err(dev->net, "can't register MDIO bus\n");
2104 		goto exit1;
2105 	}
2106 
2107 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2108 	return 0;
2109 exit1:
2110 	mdiobus_free(dev->mdiobus);
2111 	return ret;
2112 }
2113 
lan78xx_remove_mdio(struct lan78xx_net * dev)2114 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2115 {
2116 	mdiobus_unregister(dev->mdiobus);
2117 	mdiobus_free(dev->mdiobus);
2118 }
2119 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2120 static int irq_map(struct irq_domain *d, unsigned int irq,
2121 		   irq_hw_number_t hwirq)
2122 {
2123 	struct irq_domain_data *data = d->host_data;
2124 
2125 	irq_set_chip_data(irq, data);
2126 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2127 	irq_set_noprobe(irq);
2128 
2129 	return 0;
2130 }
2131 
irq_unmap(struct irq_domain * d,unsigned int irq)2132 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2133 {
2134 	irq_set_chip_and_handler(irq, NULL, NULL);
2135 	irq_set_chip_data(irq, NULL);
2136 }
2137 
2138 static const struct irq_domain_ops chip_domain_ops = {
2139 	.map	= irq_map,
2140 	.unmap	= irq_unmap,
2141 };
2142 
lan78xx_irq_mask(struct irq_data * irqd)2143 static void lan78xx_irq_mask(struct irq_data *irqd)
2144 {
2145 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2146 
2147 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2148 }
2149 
lan78xx_irq_unmask(struct irq_data * irqd)2150 static void lan78xx_irq_unmask(struct irq_data *irqd)
2151 {
2152 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2153 
2154 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2155 }
2156 
lan78xx_irq_bus_lock(struct irq_data * irqd)2157 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2158 {
2159 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2160 
2161 	mutex_lock(&data->irq_lock);
2162 }
2163 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2164 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2165 {
2166 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2167 	struct lan78xx_net *dev =
2168 			container_of(data, struct lan78xx_net, domain_data);
2169 	u32 buf;
2170 	int ret;
2171 
2172 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2173 	 * are only two callbacks executed in non-atomic contex.
2174 	 */
2175 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2176 	if (ret < 0)
2177 		goto irq_bus_sync_unlock;
2178 
2179 	if (buf != data->irqenable)
2180 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2181 
2182 irq_bus_sync_unlock:
2183 	if (ret < 0)
2184 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2185 			   ERR_PTR(ret));
2186 
2187 	mutex_unlock(&data->irq_lock);
2188 }
2189 
2190 static struct irq_chip lan78xx_irqchip = {
2191 	.name			= "lan78xx-irqs",
2192 	.irq_mask		= lan78xx_irq_mask,
2193 	.irq_unmask		= lan78xx_irq_unmask,
2194 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2195 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2196 };
2197 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2198 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2199 {
2200 	struct irq_domain *irqdomain;
2201 	unsigned int irqmap = 0;
2202 	u32 buf;
2203 	int ret = 0;
2204 
2205 	mutex_init(&dev->domain_data.irq_lock);
2206 
2207 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2208 	if (ret < 0)
2209 		return ret;
2210 
2211 	dev->domain_data.irqenable = buf;
2212 
2213 	dev->domain_data.irqchip = &lan78xx_irqchip;
2214 	dev->domain_data.irq_handler = handle_simple_irq;
2215 
2216 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2217 					     &chip_domain_ops, &dev->domain_data);
2218 	if (irqdomain) {
2219 		/* create mapping for PHY interrupt */
2220 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2221 		if (!irqmap) {
2222 			irq_domain_remove(irqdomain);
2223 
2224 			irqdomain = NULL;
2225 			ret = -EINVAL;
2226 		}
2227 	} else {
2228 		ret = -EINVAL;
2229 	}
2230 
2231 	dev->domain_data.irqdomain = irqdomain;
2232 	dev->domain_data.phyirq = irqmap;
2233 
2234 	return ret;
2235 }
2236 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2237 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2238 {
2239 	if (dev->domain_data.phyirq > 0) {
2240 		irq_dispose_mapping(dev->domain_data.phyirq);
2241 
2242 		if (dev->domain_data.irqdomain)
2243 			irq_domain_remove(dev->domain_data.irqdomain);
2244 	}
2245 	dev->domain_data.phyirq = 0;
2246 	dev->domain_data.irqdomain = NULL;
2247 }
2248 
lan78xx_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2249 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2250 			       const struct phylink_link_state *state)
2251 {
2252 	struct net_device *net = to_net_dev(config->dev);
2253 	struct lan78xx_net *dev = netdev_priv(net);
2254 	u32 mac_cr = 0;
2255 	int ret;
2256 
2257 	/* Check if the mode is supported */
2258 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2259 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2260 		return;
2261 	}
2262 
2263 	switch (state->interface) {
2264 	case PHY_INTERFACE_MODE_GMII:
2265 		mac_cr |= MAC_CR_GMII_EN_;
2266 		break;
2267 	case PHY_INTERFACE_MODE_RGMII:
2268 	case PHY_INTERFACE_MODE_RGMII_ID:
2269 	case PHY_INTERFACE_MODE_RGMII_TXID:
2270 	case PHY_INTERFACE_MODE_RGMII_RXID:
2271 		break;
2272 	default:
2273 		netdev_warn(net, "Unsupported interface mode: %d\n",
2274 			    state->interface);
2275 		return;
2276 	}
2277 
2278 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2279 	if (ret < 0)
2280 		netdev_err(net, "Failed to config MAC with error %pe\n",
2281 			   ERR_PTR(ret));
2282 }
2283 
lan78xx_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2284 static void lan78xx_mac_link_down(struct phylink_config *config,
2285 				  unsigned int mode, phy_interface_t interface)
2286 {
2287 	struct net_device *net = to_net_dev(config->dev);
2288 	struct lan78xx_net *dev = netdev_priv(net);
2289 	int ret;
2290 
2291 	netif_stop_queue(net);
2292 
2293 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2294 	 * manually before reset. TX and RX should be disabled before running
2295 	 * link_up sequence.
2296 	 */
2297 	ret = lan78xx_stop_tx_path(dev);
2298 	if (ret < 0)
2299 		goto link_down_fail;
2300 
2301 	ret = lan78xx_stop_rx_path(dev);
2302 	if (ret < 0)
2303 		goto link_down_fail;
2304 
2305 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2306 	 * really needed, but it was done in previous driver version. So, leave
2307 	 * it here.
2308 	 */
2309 	ret = lan78xx_mac_reset(dev);
2310 	if (ret < 0)
2311 		goto link_down_fail;
2312 
2313 	return;
2314 
2315 link_down_fail:
2316 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2317 		   ERR_PTR(ret));
2318 }
2319 
2320 /**
2321  * lan78xx_configure_usb - Configure USB link power settings
2322  * @dev: pointer to the LAN78xx device structure
2323  * @speed: negotiated Ethernet link speed (in Mbps)
2324  *
2325  * This function configures U1/U2 link power management for SuperSpeed
2326  * USB devices based on the current Ethernet link speed. It uses the
2327  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2328  *
2329  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2330  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2331  *
2332  * Return: 0 on success or a negative error code on failure.
2333  */
lan78xx_configure_usb(struct lan78xx_net * dev,int speed)2334 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2335 {
2336 	u32 mask, val;
2337 	int ret;
2338 
2339 	/* Only configure USB settings for SuperSpeed devices */
2340 	if (dev->udev->speed != USB_SPEED_SUPER)
2341 		return 0;
2342 
2343 	/* LAN7850 does not support USB 3.x */
2344 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2345 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2346 		return 0;
2347 	}
2348 
2349 	switch (speed) {
2350 	case SPEED_1000:
2351 		/* Disable U2, enable U1 */
2352 		ret = lan78xx_update_reg(dev, USB_CFG1,
2353 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2354 		if (ret < 0)
2355 			return ret;
2356 
2357 		return lan78xx_update_reg(dev, USB_CFG1,
2358 					  USB_CFG1_DEV_U1_INIT_EN_,
2359 					  USB_CFG1_DEV_U1_INIT_EN_);
2360 
2361 	case SPEED_100:
2362 	case SPEED_10:
2363 		/* Enable both U1 and U2 */
2364 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2365 		val = mask;
2366 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2367 
2368 	default:
2369 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2370 		return -EINVAL;
2371 	}
2372 }
2373 
2374 /**
2375  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2376  * @dev: pointer to the LAN78xx device structure
2377  * @tx_pause: enable transmission of pause frames
2378  * @rx_pause: enable reception of pause frames
2379  *
2380  * This function configures the LAN78xx flow control settings by writing
2381  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2382  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2383  * based on USB speed.
2384  *
2385  * The Pause Time field is measured in units of 512-bit times (quanta):
2386  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2387  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2388  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2389  *
2390  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2391  *   - RXUSED is the number of bytes used in the RX FIFO
2392  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2393  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2394  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2395  *
2396  * Thresholds differ by USB speed because available USB bandwidth
2397  * affects how fast packets can be drained from the RX FIFO:
2398  *   - USB 3.x (SuperSpeed):
2399  *       FLOW_ON  = 9216 bytes → 18 units
2400  *       FLOW_OFF = 4096 bytes →  8 units
2401  *   - USB 2.0 (High-Speed):
2402  *       FLOW_ON  = 8704 bytes → 17 units
2403  *       FLOW_OFF = 1024 bytes →  2 units
2404  *
2405  * Note: The FCT_FLOW register must be configured before enabling TX pause
2406  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2407  *
2408  * Return: 0 on success or a negative error code on failure.
2409  */
lan78xx_configure_flowcontrol(struct lan78xx_net * dev,bool tx_pause,bool rx_pause)2410 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2411 					 bool tx_pause, bool rx_pause)
2412 {
2413 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2414 	const u32 pause_time_quanta = 65535;
2415 	u32 fct_flow = 0;
2416 	u32 flow = 0;
2417 	int ret;
2418 
2419 	/* Prepare MAC flow control bits */
2420 	if (tx_pause)
2421 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2422 
2423 	if (rx_pause)
2424 		flow |= FLOW_CR_RX_FCEN_;
2425 
2426 	/* Select RX FIFO thresholds based on USB speed
2427 	 *
2428 	 * FCT_FLOW layout:
2429 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2430 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2431 	 *   thresholds are expressed in units of 512 bytes
2432 	 */
2433 	switch (dev->udev->speed) {
2434 	case USB_SPEED_SUPER:
2435 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2436 		break;
2437 	case USB_SPEED_HIGH:
2438 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2439 		break;
2440 	default:
2441 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2442 			    dev->udev->speed);
2443 		return -EINVAL;
2444 	}
2445 
2446 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2447 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2448 	if (ret < 0)
2449 		return ret;
2450 
2451 	/* Step 2: Enable MAC pause functionality */
2452 	return lan78xx_write_reg(dev, FLOW, flow);
2453 }
2454 
lan78xx_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2455 static void lan78xx_mac_link_up(struct phylink_config *config,
2456 				struct phy_device *phy,
2457 				unsigned int mode, phy_interface_t interface,
2458 				int speed, int duplex,
2459 				bool tx_pause, bool rx_pause)
2460 {
2461 	struct net_device *net = to_net_dev(config->dev);
2462 	struct lan78xx_net *dev = netdev_priv(net);
2463 	u32 mac_cr = 0;
2464 	int ret;
2465 
2466 	switch (speed) {
2467 	case SPEED_1000:
2468 		mac_cr |= MAC_CR_SPEED_1000_;
2469 		break;
2470 	case SPEED_100:
2471 		mac_cr |= MAC_CR_SPEED_100_;
2472 		break;
2473 	case SPEED_10:
2474 		mac_cr |= MAC_CR_SPEED_10_;
2475 		break;
2476 	default:
2477 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2478 		return;
2479 	}
2480 
2481 	if (duplex == DUPLEX_FULL)
2482 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2483 
2484 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2485 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2486 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2487 	if (ret < 0)
2488 		goto link_up_fail;
2489 
2490 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2491 	if (ret < 0)
2492 		goto link_up_fail;
2493 
2494 	ret = lan78xx_configure_usb(dev, speed);
2495 	if (ret < 0)
2496 		goto link_up_fail;
2497 
2498 	lan78xx_rx_urb_submit_all(dev);
2499 
2500 	ret = lan78xx_flush_rx_fifo(dev);
2501 	if (ret < 0)
2502 		goto link_up_fail;
2503 
2504 	ret = lan78xx_flush_tx_fifo(dev);
2505 	if (ret < 0)
2506 		goto link_up_fail;
2507 
2508 	ret = lan78xx_start_tx_path(dev);
2509 	if (ret < 0)
2510 		goto link_up_fail;
2511 
2512 	ret = lan78xx_start_rx_path(dev);
2513 	if (ret < 0)
2514 		goto link_up_fail;
2515 
2516 	netif_start_queue(net);
2517 
2518 	return;
2519 
2520 link_up_fail:
2521 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2522 		   ERR_PTR(ret));
2523 }
2524 
2525 /**
2526  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2527  * @dev: LAN78xx device
2528  * @enable: true to enable EEE, false to disable
2529  *
2530  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2531  * Efficient Ethernet (EEE) operation. According to current understanding
2532  * of the LAN7800 documentation, this bit can be modified while TX and RX
2533  * are enabled. No explicit requirement was found to disable data paths
2534  * before changing this bit.
2535  *
2536  * Return: 0 on success or a negative error code
2537  */
lan78xx_mac_eee_enable(struct lan78xx_net * dev,bool enable)2538 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2539 {
2540 	u32 mac_cr = 0;
2541 
2542 	if (enable)
2543 		mac_cr |= MAC_CR_EEE_EN_;
2544 
2545 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2546 }
2547 
lan78xx_mac_disable_tx_lpi(struct phylink_config * config)2548 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2549 {
2550 	struct net_device *net = to_net_dev(config->dev);
2551 	struct lan78xx_net *dev = netdev_priv(net);
2552 
2553 	lan78xx_mac_eee_enable(dev, false);
2554 }
2555 
lan78xx_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)2556 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2557 				     bool tx_clk_stop)
2558 {
2559 	struct net_device *net = to_net_dev(config->dev);
2560 	struct lan78xx_net *dev = netdev_priv(net);
2561 	int ret;
2562 
2563 	/* Software should only change this field when Energy Efficient
2564 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2565 	 * EEEEN during probe, and phylink itself guarantees that
2566 	 * mac_disable_tx_lpi() will have been previously called.
2567 	 */
2568 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2569 	if (ret < 0)
2570 		return ret;
2571 
2572 	return lan78xx_mac_eee_enable(dev, true);
2573 }
2574 
2575 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2576 	.mac_config = lan78xx_mac_config,
2577 	.mac_link_down = lan78xx_mac_link_down,
2578 	.mac_link_up = lan78xx_mac_link_up,
2579 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2580 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2581 };
2582 
2583 /**
2584  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2585  * @dev: LAN78xx device
2586  *
2587  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2588  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2589  * to a switch without a visible PHY.
2590  *
2591  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2592  */
lan78xx_set_fixed_link(struct lan78xx_net * dev)2593 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2594 {
2595 	static const struct phylink_link_state state = {
2596 		.speed = SPEED_1000,
2597 		.duplex = DUPLEX_FULL,
2598 	};
2599 
2600 	netdev_info(dev->net,
2601 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2602 
2603 	return phylink_set_fixed_link(dev->phylink, &state);
2604 }
2605 
2606 /**
2607  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2608  * @dev: LAN78xx device structure
2609  *
2610  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2611  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2612  * sets dev->interface based on chip ID and detected PHY type.
2613  *
2614  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2615  */
lan78xx_get_phy(struct lan78xx_net * dev)2616 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2617 {
2618 	struct phy_device *phydev;
2619 
2620 	/* Attempt to locate a PHY on the MDIO bus */
2621 	phydev = phy_find_first(dev->mdiobus);
2622 
2623 	switch (dev->chipid) {
2624 	case ID_REV_CHIP_ID_7801_:
2625 		if (phydev) {
2626 			/* External RGMII PHY detected */
2627 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2628 			phydev->is_internal = false;
2629 
2630 			if (!phydev->drv)
2631 				netdev_warn(dev->net,
2632 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2633 
2634 			return phydev;
2635 		}
2636 
2637 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2638 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2639 		return NULL;
2640 
2641 	case ID_REV_CHIP_ID_7800_:
2642 	case ID_REV_CHIP_ID_7850_:
2643 		if (!phydev)
2644 			return ERR_PTR(-ENODEV);
2645 
2646 		/* These use internal GMII-connected PHY */
2647 		dev->interface = PHY_INTERFACE_MODE_GMII;
2648 		phydev->is_internal = true;
2649 		return phydev;
2650 
2651 	default:
2652 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2653 		return ERR_PTR(-ENODEV);
2654 	}
2655 }
2656 
2657 /**
2658  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2659  * @dev: LAN78xx device
2660  *
2661  * Configure MAC-side registers according to dev->interface, which should be
2662  * set by lan78xx_get_phy().
2663  *
2664  * - For PHY_INTERFACE_MODE_RGMII:
2665  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2666  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2667  *   connected to the KSZ9897 switch, and the link timing is expected to be
2668  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2669  *   assumed here.
2670  *
2671  * - For PHY_INTERFACE_MODE_RGMII_ID:
2672  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2673  *
2674  * - For GMII, no MAC-specific config is needed.
2675  *
2676  * Return: 0 on success or a negative error code.
2677  */
lan78xx_mac_prepare_for_phy(struct lan78xx_net * dev)2678 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2679 {
2680 	int ret;
2681 
2682 	switch (dev->interface) {
2683 	case PHY_INTERFACE_MODE_RGMII:
2684 		/* Enable MAC-side TX clock delay */
2685 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2686 					MAC_RGMII_ID_TXC_DELAY_EN_);
2687 		if (ret < 0)
2688 			return ret;
2689 
2690 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2691 		if (ret < 0)
2692 			return ret;
2693 
2694 		ret = lan78xx_update_reg(dev, HW_CFG,
2695 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2696 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2697 		if (ret < 0)
2698 			return ret;
2699 
2700 		break;
2701 
2702 	case PHY_INTERFACE_MODE_RGMII_ID:
2703 		/* Disable MAC-side TXC delay, PHY provides it */
2704 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2705 		if (ret < 0)
2706 			return ret;
2707 
2708 		break;
2709 
2710 	case PHY_INTERFACE_MODE_GMII:
2711 		/* No MAC-specific configuration required */
2712 		break;
2713 
2714 	default:
2715 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2716 			    dev->interface);
2717 		break;
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 /**
2724  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2725  * @dev: LAN78xx device
2726  * @phydev: PHY device (must be valid)
2727  *
2728  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2729  * the corresponding number of LEDs by writing to HW_CFG.
2730  *
2731  * This helper preserves the original logic, enabling up to 4 LEDs.
2732  * If the property is not present, this function does nothing.
2733  *
2734  * Return: 0 on success or a negative error code.
2735  */
lan78xx_configure_leds_from_dt(struct lan78xx_net * dev,struct phy_device * phydev)2736 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2737 					  struct phy_device *phydev)
2738 {
2739 	struct device_node *np = phydev->mdio.dev.of_node;
2740 	u32 reg;
2741 	int len, ret;
2742 
2743 	if (!np)
2744 		return 0;
2745 
2746 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2747 					      sizeof(u32));
2748 	if (len < 0)
2749 		return 0;
2750 
2751 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2752 	if (ret < 0)
2753 		return ret;
2754 
2755 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2756 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2757 
2758 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2759 	       (len > 1) * HW_CFG_LED1_EN_ |
2760 	       (len > 2) * HW_CFG_LED2_EN_ |
2761 	       (len > 3) * HW_CFG_LED3_EN_;
2762 
2763 	return lan78xx_write_reg(dev, HW_CFG, reg);
2764 }
2765 
lan78xx_phylink_setup(struct lan78xx_net * dev)2766 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2767 {
2768 	struct phylink_config *pc = &dev->phylink_config;
2769 	struct phylink *phylink;
2770 
2771 	pc->dev = &dev->net->dev;
2772 	pc->type = PHYLINK_NETDEV;
2773 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2774 			       MAC_100 | MAC_1000FD;
2775 	pc->mac_managed_pm = true;
2776 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2777 	/*
2778 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2779 	 *
2780 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2781 	 *
2782 	 * Reasoning:
2783 	 * According to the application note in the LAN7800 documentation, a
2784 	 * zero delay may negatively impact the TX data path’s ability to
2785 	 * support Gigabit operation. A value of 50us is recommended as a
2786 	 * reasonable default when the part operates at Gigabit speeds,
2787 	 * balancing stability and power efficiency in EEE mode. This delay can
2788 	 * be increased based on performance testing, as EEE is designed for
2789 	 * scenarios with mostly idle links and occasional bursts of full
2790 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2791 	 * performance without overly aggressive power optimization during
2792 	 * inactive periods.
2793 	 */
2794 	pc->lpi_timer_default = 50;
2795 	pc->eee_enabled_default = true;
2796 
2797 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2798 		phy_interface_set_rgmii(pc->supported_interfaces);
2799 	else
2800 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2801 
2802 	memcpy(dev->phylink_config.lpi_interfaces,
2803 	       dev->phylink_config.supported_interfaces,
2804 	       sizeof(dev->phylink_config.lpi_interfaces));
2805 
2806 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2807 				 dev->interface, &lan78xx_phylink_mac_ops);
2808 	if (IS_ERR(phylink))
2809 		return PTR_ERR(phylink);
2810 
2811 	dev->phylink = phylink;
2812 
2813 	return 0;
2814 }
2815 
lan78xx_phy_uninit(struct lan78xx_net * dev)2816 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2817 {
2818 	if (dev->phylink) {
2819 		phylink_disconnect_phy(dev->phylink);
2820 		phylink_destroy(dev->phylink);
2821 		dev->phylink = NULL;
2822 	}
2823 }
2824 
lan78xx_phy_init(struct lan78xx_net * dev)2825 static int lan78xx_phy_init(struct lan78xx_net *dev)
2826 {
2827 	struct phy_device *phydev;
2828 	int ret;
2829 
2830 	phydev = lan78xx_get_phy(dev);
2831 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2832 	 * which will use a fixed link later.
2833 	 * If an  error occurs, return the error code immediately.
2834 	 */
2835 	if (IS_ERR(phydev))
2836 		return PTR_ERR(phydev);
2837 
2838 	ret = lan78xx_phylink_setup(dev);
2839 	if (ret < 0)
2840 		return ret;
2841 
2842 	ret = lan78xx_mac_prepare_for_phy(dev);
2843 	if (ret < 0)
2844 		goto phylink_uninit;
2845 
2846 	/* If no PHY is found, set up a fixed link. It is very specific to
2847 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2848 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2849 	 * a visible PHY.
2850 	 */
2851 	if (!phydev) {
2852 		ret = lan78xx_set_fixed_link(dev);
2853 		if (ret < 0)
2854 			goto phylink_uninit;
2855 
2856 		/* No PHY found, so set up a fixed link and return early.
2857 		 * No need to configure PHY IRQ or attach to phylink.
2858 		 */
2859 		return 0;
2860 	}
2861 
2862 	/* if phyirq is not set, use polling mode in phylib */
2863 	if (dev->domain_data.phyirq > 0)
2864 		phydev->irq = dev->domain_data.phyirq;
2865 	else
2866 		phydev->irq = PHY_POLL;
2867 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2868 
2869 	ret = phylink_connect_phy(dev->phylink, phydev);
2870 	if (ret) {
2871 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2872 			   dev->mdiobus->id, ERR_PTR(ret));
2873 		goto phylink_uninit;
2874 	}
2875 
2876 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2877 	if (ret < 0)
2878 		goto phylink_uninit;
2879 
2880 	return 0;
2881 
2882 phylink_uninit:
2883 	lan78xx_phy_uninit(dev);
2884 
2885 	return ret;
2886 }
2887 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2888 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2889 {
2890 	bool rxenabled;
2891 	u32 buf;
2892 	int ret;
2893 
2894 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2895 	if (ret < 0)
2896 		return ret;
2897 
2898 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2899 
2900 	if (rxenabled) {
2901 		buf &= ~MAC_RX_RXEN_;
2902 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2903 		if (ret < 0)
2904 			return ret;
2905 	}
2906 
2907 	/* add 4 to size for FCS */
2908 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2909 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2910 
2911 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2912 	if (ret < 0)
2913 		return ret;
2914 
2915 	if (rxenabled) {
2916 		buf |= MAC_RX_RXEN_;
2917 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2918 		if (ret < 0)
2919 			return ret;
2920 	}
2921 
2922 	return 0;
2923 }
2924 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2925 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2926 {
2927 	struct sk_buff *skb;
2928 	unsigned long flags;
2929 	int count = 0;
2930 
2931 	spin_lock_irqsave(&q->lock, flags);
2932 	while (!skb_queue_empty(q)) {
2933 		struct skb_data	*entry;
2934 		struct urb *urb;
2935 		int ret;
2936 
2937 		skb_queue_walk(q, skb) {
2938 			entry = (struct skb_data *)skb->cb;
2939 			if (entry->state != unlink_start)
2940 				goto found;
2941 		}
2942 		break;
2943 found:
2944 		entry->state = unlink_start;
2945 		urb = entry->urb;
2946 
2947 		/* Get reference count of the URB to avoid it to be
2948 		 * freed during usb_unlink_urb, which may trigger
2949 		 * use-after-free problem inside usb_unlink_urb since
2950 		 * usb_unlink_urb is always racing with .complete
2951 		 * handler(include defer_bh).
2952 		 */
2953 		usb_get_urb(urb);
2954 		spin_unlock_irqrestore(&q->lock, flags);
2955 		/* during some PM-driven resume scenarios,
2956 		 * these (async) unlinks complete immediately
2957 		 */
2958 		ret = usb_unlink_urb(urb);
2959 		if (ret != -EINPROGRESS && ret != 0)
2960 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2961 		else
2962 			count++;
2963 		usb_put_urb(urb);
2964 		spin_lock_irqsave(&q->lock, flags);
2965 	}
2966 	spin_unlock_irqrestore(&q->lock, flags);
2967 	return count;
2968 }
2969 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2970 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2971 {
2972 	struct lan78xx_net *dev = netdev_priv(netdev);
2973 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2974 	int ret;
2975 
2976 	/* no second zero-length packet read wanted after mtu-sized packets */
2977 	if ((max_frame_len % dev->maxpacket) == 0)
2978 		return -EDOM;
2979 
2980 	ret = usb_autopm_get_interface(dev->intf);
2981 	if (ret < 0)
2982 		return ret;
2983 
2984 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2985 	if (ret < 0)
2986 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2987 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2988 	else
2989 		WRITE_ONCE(netdev->mtu, new_mtu);
2990 
2991 	usb_autopm_put_interface(dev->intf);
2992 
2993 	return ret;
2994 }
2995 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2996 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2997 {
2998 	struct lan78xx_net *dev = netdev_priv(netdev);
2999 	struct sockaddr *addr = p;
3000 	u32 addr_lo, addr_hi;
3001 	int ret;
3002 
3003 	if (netif_running(netdev))
3004 		return -EBUSY;
3005 
3006 	if (!is_valid_ether_addr(addr->sa_data))
3007 		return -EADDRNOTAVAIL;
3008 
3009 	eth_hw_addr_set(netdev, addr->sa_data);
3010 
3011 	addr_lo = netdev->dev_addr[0] |
3012 		  netdev->dev_addr[1] << 8 |
3013 		  netdev->dev_addr[2] << 16 |
3014 		  netdev->dev_addr[3] << 24;
3015 	addr_hi = netdev->dev_addr[4] |
3016 		  netdev->dev_addr[5] << 8;
3017 
3018 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3019 	if (ret < 0)
3020 		return ret;
3021 
3022 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3023 	if (ret < 0)
3024 		return ret;
3025 
3026 	/* Added to support MAC address changes */
3027 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3028 	if (ret < 0)
3029 		return ret;
3030 
3031 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3032 }
3033 
3034 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)3035 static int lan78xx_set_features(struct net_device *netdev,
3036 				netdev_features_t features)
3037 {
3038 	struct lan78xx_net *dev = netdev_priv(netdev);
3039 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3040 	unsigned long flags;
3041 
3042 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3043 
3044 	if (features & NETIF_F_RXCSUM) {
3045 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3046 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3047 	} else {
3048 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3049 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3050 	}
3051 
3052 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3053 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3054 	else
3055 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3056 
3057 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3058 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3059 	else
3060 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3061 
3062 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3063 
3064 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3065 }
3066 
lan78xx_deferred_vlan_write(struct work_struct * param)3067 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3068 {
3069 	struct lan78xx_priv *pdata =
3070 			container_of(param, struct lan78xx_priv, set_vlan);
3071 	struct lan78xx_net *dev = pdata->dev;
3072 
3073 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3074 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3075 }
3076 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3077 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3078 				   __be16 proto, u16 vid)
3079 {
3080 	struct lan78xx_net *dev = netdev_priv(netdev);
3081 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3082 	u16 vid_bit_index;
3083 	u16 vid_dword_index;
3084 
3085 	vid_dword_index = (vid >> 5) & 0x7F;
3086 	vid_bit_index = vid & 0x1F;
3087 
3088 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3089 
3090 	/* defer register writes to a sleepable context */
3091 	schedule_work(&pdata->set_vlan);
3092 
3093 	return 0;
3094 }
3095 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3096 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3097 				    __be16 proto, u16 vid)
3098 {
3099 	struct lan78xx_net *dev = netdev_priv(netdev);
3100 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3101 	u16 vid_bit_index;
3102 	u16 vid_dword_index;
3103 
3104 	vid_dword_index = (vid >> 5) & 0x7F;
3105 	vid_bit_index = vid & 0x1F;
3106 
3107 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3108 
3109 	/* defer register writes to a sleepable context */
3110 	schedule_work(&pdata->set_vlan);
3111 
3112 	return 0;
3113 }
3114 
lan78xx_init_ltm(struct lan78xx_net * dev)3115 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3116 {
3117 	u32 regs[6] = { 0 };
3118 	int ret;
3119 	u32 buf;
3120 
3121 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3122 	if (ret < 0)
3123 		goto init_ltm_failed;
3124 
3125 	if (buf & USB_CFG1_LTM_ENABLE_) {
3126 		u8 temp[2];
3127 		/* Get values from EEPROM first */
3128 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3129 			if (temp[0] == 24) {
3130 				ret = lan78xx_read_raw_eeprom(dev,
3131 							      temp[1] * 2,
3132 							      24,
3133 							      (u8 *)regs);
3134 				if (ret < 0)
3135 					return ret;
3136 			}
3137 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3138 			if (temp[0] == 24) {
3139 				ret = lan78xx_read_raw_otp(dev,
3140 							   temp[1] * 2,
3141 							   24,
3142 							   (u8 *)regs);
3143 				if (ret < 0)
3144 					return ret;
3145 			}
3146 		}
3147 	}
3148 
3149 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3150 	if (ret < 0)
3151 		goto init_ltm_failed;
3152 
3153 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3154 	if (ret < 0)
3155 		goto init_ltm_failed;
3156 
3157 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3158 	if (ret < 0)
3159 		goto init_ltm_failed;
3160 
3161 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3162 	if (ret < 0)
3163 		goto init_ltm_failed;
3164 
3165 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3166 	if (ret < 0)
3167 		goto init_ltm_failed;
3168 
3169 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3170 	if (ret < 0)
3171 		goto init_ltm_failed;
3172 
3173 	return 0;
3174 
3175 init_ltm_failed:
3176 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3177 	return ret;
3178 }
3179 
lan78xx_urb_config_init(struct lan78xx_net * dev)3180 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3181 {
3182 	int result = 0;
3183 
3184 	switch (dev->udev->speed) {
3185 	case USB_SPEED_SUPER:
3186 		dev->rx_urb_size = RX_SS_URB_SIZE;
3187 		dev->tx_urb_size = TX_SS_URB_SIZE;
3188 		dev->n_rx_urbs = RX_SS_URB_NUM;
3189 		dev->n_tx_urbs = TX_SS_URB_NUM;
3190 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3191 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3192 		break;
3193 	case USB_SPEED_HIGH:
3194 		dev->rx_urb_size = RX_HS_URB_SIZE;
3195 		dev->tx_urb_size = TX_HS_URB_SIZE;
3196 		dev->n_rx_urbs = RX_HS_URB_NUM;
3197 		dev->n_tx_urbs = TX_HS_URB_NUM;
3198 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3199 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3200 		break;
3201 	case USB_SPEED_FULL:
3202 		dev->rx_urb_size = RX_FS_URB_SIZE;
3203 		dev->tx_urb_size = TX_FS_URB_SIZE;
3204 		dev->n_rx_urbs = RX_FS_URB_NUM;
3205 		dev->n_tx_urbs = TX_FS_URB_NUM;
3206 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3207 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3208 		break;
3209 	default:
3210 		netdev_warn(dev->net, "USB bus speed not supported\n");
3211 		result = -EIO;
3212 		break;
3213 	}
3214 
3215 	return result;
3216 }
3217 
lan78xx_reset(struct lan78xx_net * dev)3218 static int lan78xx_reset(struct lan78xx_net *dev)
3219 {
3220 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3221 	unsigned long timeout;
3222 	int ret;
3223 	u32 buf;
3224 
3225 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3226 	if (ret < 0)
3227 		return ret;
3228 
3229 	buf |= HW_CFG_LRST_;
3230 
3231 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3232 	if (ret < 0)
3233 		return ret;
3234 
3235 	timeout = jiffies + HZ;
3236 	do {
3237 		mdelay(1);
3238 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3239 		if (ret < 0)
3240 			return ret;
3241 
3242 		if (time_after(jiffies, timeout)) {
3243 			netdev_warn(dev->net,
3244 				    "timeout on completion of LiteReset");
3245 			ret = -ETIMEDOUT;
3246 			return ret;
3247 		}
3248 	} while (buf & HW_CFG_LRST_);
3249 
3250 	ret = lan78xx_init_mac_address(dev);
3251 	if (ret < 0)
3252 		return ret;
3253 
3254 	/* save DEVID for later usage */
3255 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3256 	if (ret < 0)
3257 		return ret;
3258 
3259 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3260 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3261 
3262 	/* Respond to the IN token with a NAK */
3263 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3264 	if (ret < 0)
3265 		return ret;
3266 
3267 	buf |= USB_CFG_BIR_;
3268 
3269 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3270 	if (ret < 0)
3271 		return ret;
3272 
3273 	/* Init LTM */
3274 	ret = lan78xx_init_ltm(dev);
3275 	if (ret < 0)
3276 		return ret;
3277 
3278 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3279 	if (ret < 0)
3280 		return ret;
3281 
3282 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3283 	if (ret < 0)
3284 		return ret;
3285 
3286 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3287 	if (ret < 0)
3288 		return ret;
3289 
3290 	buf |= HW_CFG_MEF_;
3291 	buf |= HW_CFG_CLK125_EN_;
3292 	buf |= HW_CFG_REFCLK25_EN_;
3293 
3294 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3295 	if (ret < 0)
3296 		return ret;
3297 
3298 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3299 	if (ret < 0)
3300 		return ret;
3301 
3302 	buf |= USB_CFG_BCE_;
3303 
3304 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3305 	if (ret < 0)
3306 		return ret;
3307 
3308 	/* set FIFO sizes */
3309 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3310 
3311 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3312 	if (ret < 0)
3313 		return ret;
3314 
3315 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3316 
3317 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3318 	if (ret < 0)
3319 		return ret;
3320 
3321 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3322 	if (ret < 0)
3323 		return ret;
3324 
3325 	ret = lan78xx_write_reg(dev, FLOW, 0);
3326 	if (ret < 0)
3327 		return ret;
3328 
3329 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3330 	if (ret < 0)
3331 		return ret;
3332 
3333 	/* Don't need rfe_ctl_lock during initialisation */
3334 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3335 	if (ret < 0)
3336 		return ret;
3337 
3338 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3339 
3340 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3341 	if (ret < 0)
3342 		return ret;
3343 
3344 	/* Enable or disable checksum offload engines */
3345 	ret = lan78xx_set_features(dev->net, dev->net->features);
3346 	if (ret < 0)
3347 		return ret;
3348 
3349 	lan78xx_set_multicast(dev->net);
3350 
3351 	/* reset PHY */
3352 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3353 	if (ret < 0)
3354 		return ret;
3355 
3356 	buf |= PMT_CTL_PHY_RST_;
3357 
3358 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3359 	if (ret < 0)
3360 		return ret;
3361 
3362 	timeout = jiffies + HZ;
3363 	do {
3364 		mdelay(1);
3365 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3366 		if (ret < 0)
3367 			return ret;
3368 
3369 		if (time_after(jiffies, timeout)) {
3370 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3371 			ret = -ETIMEDOUT;
3372 			return ret;
3373 		}
3374 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3375 
3376 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3377 	if (ret < 0)
3378 		return ret;
3379 
3380 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3381 
3382 	/* LAN7801 only has RGMII mode */
3383 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3384 		buf &= ~MAC_CR_GMII_EN_;
3385 
3386 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3387 	if (ret < 0)
3388 		return ret;
3389 
3390 	ret = lan78xx_set_rx_max_frame_length(dev,
3391 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3392 
3393 	return ret;
3394 }
3395 
lan78xx_init_stats(struct lan78xx_net * dev)3396 static void lan78xx_init_stats(struct lan78xx_net *dev)
3397 {
3398 	u32 *p;
3399 	int i;
3400 
3401 	/* initialize for stats update
3402 	 * some counters are 20bits and some are 32bits
3403 	 */
3404 	p = (u32 *)&dev->stats.rollover_max;
3405 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3406 		p[i] = 0xFFFFF;
3407 
3408 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3409 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3410 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3411 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3418 
3419 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3420 }
3421 
lan78xx_open(struct net_device * net)3422 static int lan78xx_open(struct net_device *net)
3423 {
3424 	struct lan78xx_net *dev = netdev_priv(net);
3425 	int ret;
3426 
3427 	netif_dbg(dev, ifup, dev->net, "open device");
3428 
3429 	ret = usb_autopm_get_interface(dev->intf);
3430 	if (ret < 0)
3431 		return ret;
3432 
3433 	mutex_lock(&dev->dev_mutex);
3434 
3435 	lan78xx_init_stats(dev);
3436 
3437 	napi_enable(&dev->napi);
3438 
3439 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3440 
3441 	/* for Link Check */
3442 	if (dev->urb_intr) {
3443 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3444 		if (ret < 0) {
3445 			netif_err(dev, ifup, dev->net,
3446 				  "intr submit %d\n", ret);
3447 			goto done;
3448 		}
3449 	}
3450 
3451 	phylink_start(dev->phylink);
3452 
3453 done:
3454 	mutex_unlock(&dev->dev_mutex);
3455 
3456 	if (ret < 0)
3457 		usb_autopm_put_interface(dev->intf);
3458 
3459 	return ret;
3460 }
3461 
lan78xx_terminate_urbs(struct lan78xx_net * dev)3462 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3463 {
3464 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3465 	DECLARE_WAITQUEUE(wait, current);
3466 	int temp;
3467 
3468 	/* ensure there are no more active urbs */
3469 	add_wait_queue(&unlink_wakeup, &wait);
3470 	set_current_state(TASK_UNINTERRUPTIBLE);
3471 	dev->wait = &unlink_wakeup;
3472 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3473 
3474 	/* maybe wait for deletions to finish. */
3475 	while (!skb_queue_empty(&dev->rxq) ||
3476 	       !skb_queue_empty(&dev->txq)) {
3477 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3478 		set_current_state(TASK_UNINTERRUPTIBLE);
3479 		netif_dbg(dev, ifdown, dev->net,
3480 			  "waited for %d urb completions", temp);
3481 	}
3482 	set_current_state(TASK_RUNNING);
3483 	dev->wait = NULL;
3484 	remove_wait_queue(&unlink_wakeup, &wait);
3485 
3486 	/* empty Rx done, Rx overflow and Tx pend queues
3487 	 */
3488 	while (!skb_queue_empty(&dev->rxq_done)) {
3489 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3490 
3491 		lan78xx_release_rx_buf(dev, skb);
3492 	}
3493 
3494 	skb_queue_purge(&dev->rxq_overflow);
3495 	skb_queue_purge(&dev->txq_pend);
3496 }
3497 
lan78xx_stop(struct net_device * net)3498 static int lan78xx_stop(struct net_device *net)
3499 {
3500 	struct lan78xx_net *dev = netdev_priv(net);
3501 
3502 	netif_dbg(dev, ifup, dev->net, "stop device");
3503 
3504 	mutex_lock(&dev->dev_mutex);
3505 
3506 	if (timer_pending(&dev->stat_monitor))
3507 		timer_delete_sync(&dev->stat_monitor);
3508 
3509 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3510 	napi_disable(&dev->napi);
3511 
3512 	lan78xx_terminate_urbs(dev);
3513 
3514 	netif_info(dev, ifdown, dev->net,
3515 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3516 		   net->stats.rx_packets, net->stats.tx_packets,
3517 		   net->stats.rx_errors, net->stats.tx_errors);
3518 
3519 	phylink_stop(dev->phylink);
3520 
3521 	usb_kill_urb(dev->urb_intr);
3522 
3523 	/* deferred work (task, timer, softirq) must also stop.
3524 	 * can't flush_scheduled_work() until we drop rtnl (later),
3525 	 * else workers could deadlock; so make workers a NOP.
3526 	 */
3527 	clear_bit(EVENT_TX_HALT, &dev->flags);
3528 	clear_bit(EVENT_RX_HALT, &dev->flags);
3529 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3530 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3531 
3532 	cancel_delayed_work_sync(&dev->wq);
3533 
3534 	usb_autopm_put_interface(dev->intf);
3535 
3536 	mutex_unlock(&dev->dev_mutex);
3537 
3538 	return 0;
3539 }
3540 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3541 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3542 			       struct sk_buff_head *list, enum skb_state state)
3543 {
3544 	unsigned long flags;
3545 	enum skb_state old_state;
3546 	struct skb_data *entry = (struct skb_data *)skb->cb;
3547 
3548 	spin_lock_irqsave(&list->lock, flags);
3549 	old_state = entry->state;
3550 	entry->state = state;
3551 
3552 	__skb_unlink(skb, list);
3553 	spin_unlock(&list->lock);
3554 	spin_lock(&dev->rxq_done.lock);
3555 
3556 	__skb_queue_tail(&dev->rxq_done, skb);
3557 	if (skb_queue_len(&dev->rxq_done) == 1)
3558 		napi_schedule(&dev->napi);
3559 
3560 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3561 
3562 	return old_state;
3563 }
3564 
tx_complete(struct urb * urb)3565 static void tx_complete(struct urb *urb)
3566 {
3567 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3568 	struct skb_data *entry = (struct skb_data *)skb->cb;
3569 	struct lan78xx_net *dev = entry->dev;
3570 
3571 	if (urb->status == 0) {
3572 		dev->net->stats.tx_packets += entry->num_of_packet;
3573 		dev->net->stats.tx_bytes += entry->length;
3574 	} else {
3575 		dev->net->stats.tx_errors += entry->num_of_packet;
3576 
3577 		switch (urb->status) {
3578 		case -EPIPE:
3579 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3580 			break;
3581 
3582 		/* software-driven interface shutdown */
3583 		case -ECONNRESET:
3584 		case -ESHUTDOWN:
3585 			netif_dbg(dev, tx_err, dev->net,
3586 				  "tx err interface gone %d\n",
3587 				  entry->urb->status);
3588 			break;
3589 
3590 		case -EPROTO:
3591 		case -ETIME:
3592 		case -EILSEQ:
3593 			netif_stop_queue(dev->net);
3594 			netif_dbg(dev, tx_err, dev->net,
3595 				  "tx err queue stopped %d\n",
3596 				  entry->urb->status);
3597 			break;
3598 		default:
3599 			netif_dbg(dev, tx_err, dev->net,
3600 				  "unknown tx err %d\n",
3601 				  entry->urb->status);
3602 			break;
3603 		}
3604 	}
3605 
3606 	usb_autopm_put_interface_async(dev->intf);
3607 
3608 	skb_unlink(skb, &dev->txq);
3609 
3610 	lan78xx_release_tx_buf(dev, skb);
3611 
3612 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3613 	 */
3614 	if (skb_queue_empty(&dev->txq) &&
3615 	    !skb_queue_empty(&dev->txq_pend))
3616 		napi_schedule(&dev->napi);
3617 }
3618 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3619 static void lan78xx_queue_skb(struct sk_buff_head *list,
3620 			      struct sk_buff *newsk, enum skb_state state)
3621 {
3622 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3623 
3624 	__skb_queue_tail(list, newsk);
3625 	entry->state = state;
3626 }
3627 
lan78xx_tx_urb_space(struct lan78xx_net * dev)3628 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3629 {
3630 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3631 }
3632 
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3633 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3634 {
3635 	return dev->tx_pend_data_len;
3636 }
3637 
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3638 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3639 				    struct sk_buff *skb,
3640 				    unsigned int *tx_pend_data_len)
3641 {
3642 	unsigned long flags;
3643 
3644 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3645 
3646 	__skb_queue_tail(&dev->txq_pend, skb);
3647 
3648 	dev->tx_pend_data_len += skb->len;
3649 	*tx_pend_data_len = dev->tx_pend_data_len;
3650 
3651 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3652 }
3653 
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3654 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3655 					 struct sk_buff *skb,
3656 					 unsigned int *tx_pend_data_len)
3657 {
3658 	unsigned long flags;
3659 
3660 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3661 
3662 	__skb_queue_head(&dev->txq_pend, skb);
3663 
3664 	dev->tx_pend_data_len += skb->len;
3665 	*tx_pend_data_len = dev->tx_pend_data_len;
3666 
3667 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3668 }
3669 
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3670 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3671 				    struct sk_buff **skb,
3672 				    unsigned int *tx_pend_data_len)
3673 {
3674 	unsigned long flags;
3675 
3676 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3677 
3678 	*skb = __skb_dequeue(&dev->txq_pend);
3679 	if (*skb)
3680 		dev->tx_pend_data_len -= (*skb)->len;
3681 	*tx_pend_data_len = dev->tx_pend_data_len;
3682 
3683 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3684 }
3685 
3686 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3687 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3688 {
3689 	struct lan78xx_net *dev = netdev_priv(net);
3690 	unsigned int tx_pend_data_len;
3691 
3692 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3693 		schedule_delayed_work(&dev->wq, 0);
3694 
3695 	skb_tx_timestamp(skb);
3696 
3697 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3698 
3699 	/* Set up a Tx URB if none is in progress */
3700 
3701 	if (skb_queue_empty(&dev->txq))
3702 		napi_schedule(&dev->napi);
3703 
3704 	/* Stop stack Tx queue if we have enough data to fill
3705 	 * all the free Tx URBs.
3706 	 */
3707 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3708 		netif_stop_queue(net);
3709 
3710 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3711 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3712 
3713 		/* Kick off transmission of pending data */
3714 
3715 		if (!skb_queue_empty(&dev->txq_free))
3716 			napi_schedule(&dev->napi);
3717 	}
3718 
3719 	return NETDEV_TX_OK;
3720 }
3721 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3722 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3723 {
3724 	struct lan78xx_priv *pdata = NULL;
3725 	int ret;
3726 	int i;
3727 
3728 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3729 
3730 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3731 	if (!pdata) {
3732 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3733 		return -ENOMEM;
3734 	}
3735 
3736 	pdata->dev = dev;
3737 
3738 	spin_lock_init(&pdata->rfe_ctl_lock);
3739 	mutex_init(&pdata->dataport_mutex);
3740 
3741 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3742 
3743 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3744 		pdata->vlan_table[i] = 0;
3745 
3746 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3747 
3748 	dev->net->features = 0;
3749 
3750 	if (DEFAULT_TX_CSUM_ENABLE)
3751 		dev->net->features |= NETIF_F_HW_CSUM;
3752 
3753 	if (DEFAULT_RX_CSUM_ENABLE)
3754 		dev->net->features |= NETIF_F_RXCSUM;
3755 
3756 	if (DEFAULT_TSO_CSUM_ENABLE)
3757 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3758 
3759 	if (DEFAULT_VLAN_RX_OFFLOAD)
3760 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3761 
3762 	if (DEFAULT_VLAN_FILTER_ENABLE)
3763 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3764 
3765 	dev->net->hw_features = dev->net->features;
3766 
3767 	ret = lan78xx_setup_irq_domain(dev);
3768 	if (ret < 0) {
3769 		netdev_warn(dev->net,
3770 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3771 		goto out1;
3772 	}
3773 
3774 	/* Init all registers */
3775 	ret = lan78xx_reset(dev);
3776 	if (ret) {
3777 		netdev_warn(dev->net, "Registers INIT FAILED....");
3778 		goto out2;
3779 	}
3780 
3781 	ret = lan78xx_mdio_init(dev);
3782 	if (ret) {
3783 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3784 		goto out2;
3785 	}
3786 
3787 	dev->net->flags |= IFF_MULTICAST;
3788 
3789 	pdata->wol = WAKE_MAGIC;
3790 
3791 	return ret;
3792 
3793 out2:
3794 	lan78xx_remove_irq_domain(dev);
3795 
3796 out1:
3797 	netdev_warn(dev->net, "Bind routine FAILED");
3798 	cancel_work_sync(&pdata->set_multicast);
3799 	cancel_work_sync(&pdata->set_vlan);
3800 	kfree(pdata);
3801 	return ret;
3802 }
3803 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3804 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3805 {
3806 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3807 
3808 	lan78xx_remove_irq_domain(dev);
3809 
3810 	lan78xx_remove_mdio(dev);
3811 
3812 	if (pdata) {
3813 		cancel_work_sync(&pdata->set_multicast);
3814 		cancel_work_sync(&pdata->set_vlan);
3815 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3816 		kfree(pdata);
3817 		pdata = NULL;
3818 		dev->data[0] = 0;
3819 	}
3820 }
3821 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3822 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3823 				    struct sk_buff *skb,
3824 				    u32 rx_cmd_a, u32 rx_cmd_b)
3825 {
3826 	/* HW Checksum offload appears to be flawed if used when not stripping
3827 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3828 	 */
3829 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3830 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3831 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3832 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3833 		skb->ip_summed = CHECKSUM_NONE;
3834 	} else {
3835 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3836 		skb->ip_summed = CHECKSUM_COMPLETE;
3837 	}
3838 }
3839 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3840 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3841 				    struct sk_buff *skb,
3842 				    u32 rx_cmd_a, u32 rx_cmd_b)
3843 {
3844 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3845 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3846 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3847 				       (rx_cmd_b & 0xffff));
3848 }
3849 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3850 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3851 {
3852 	dev->net->stats.rx_packets++;
3853 	dev->net->stats.rx_bytes += skb->len;
3854 
3855 	skb->protocol = eth_type_trans(skb, dev->net);
3856 
3857 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3858 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3859 	memset(skb->cb, 0, sizeof(struct skb_data));
3860 
3861 	if (skb_defer_rx_timestamp(skb))
3862 		return;
3863 
3864 	napi_gro_receive(&dev->napi, skb);
3865 }
3866 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3867 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3868 		      int budget, int *work_done)
3869 {
3870 	if (skb->len < RX_SKB_MIN_LEN)
3871 		return 0;
3872 
3873 	/* Extract frames from the URB buffer and pass each one to
3874 	 * the stack in a new NAPI SKB.
3875 	 */
3876 	while (skb->len > 0) {
3877 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3878 		u16 rx_cmd_c;
3879 		unsigned char *packet;
3880 
3881 		rx_cmd_a = get_unaligned_le32(skb->data);
3882 		skb_pull(skb, sizeof(rx_cmd_a));
3883 
3884 		rx_cmd_b = get_unaligned_le32(skb->data);
3885 		skb_pull(skb, sizeof(rx_cmd_b));
3886 
3887 		rx_cmd_c = get_unaligned_le16(skb->data);
3888 		skb_pull(skb, sizeof(rx_cmd_c));
3889 
3890 		packet = skb->data;
3891 
3892 		/* get the packet length */
3893 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3894 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3895 
3896 		if (unlikely(size > skb->len)) {
3897 			netif_dbg(dev, rx_err, dev->net,
3898 				  "size err rx_cmd_a=0x%08x\n",
3899 				  rx_cmd_a);
3900 			return 0;
3901 		}
3902 
3903 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3904 			netif_dbg(dev, rx_err, dev->net,
3905 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3906 		} else {
3907 			u32 frame_len;
3908 			struct sk_buff *skb2;
3909 
3910 			if (unlikely(size < ETH_FCS_LEN)) {
3911 				netif_dbg(dev, rx_err, dev->net,
3912 					  "size err rx_cmd_a=0x%08x\n",
3913 					  rx_cmd_a);
3914 				return 0;
3915 			}
3916 
3917 			frame_len = size - ETH_FCS_LEN;
3918 
3919 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3920 			if (!skb2)
3921 				return 0;
3922 
3923 			memcpy(skb2->data, packet, frame_len);
3924 
3925 			skb_put(skb2, frame_len);
3926 
3927 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3928 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3929 
3930 			/* Processing of the URB buffer must complete once
3931 			 * it has started. If the NAPI work budget is exhausted
3932 			 * while frames remain they are added to the overflow
3933 			 * queue for delivery in the next NAPI polling cycle.
3934 			 */
3935 			if (*work_done < budget) {
3936 				lan78xx_skb_return(dev, skb2);
3937 				++(*work_done);
3938 			} else {
3939 				skb_queue_tail(&dev->rxq_overflow, skb2);
3940 			}
3941 		}
3942 
3943 		skb_pull(skb, size);
3944 
3945 		/* skip padding bytes before the next frame starts */
3946 		if (skb->len)
3947 			skb_pull(skb, align_count);
3948 	}
3949 
3950 	return 1;
3951 }
3952 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3953 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3954 			      int budget, int *work_done)
3955 {
3956 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3957 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3958 		dev->net->stats.rx_errors++;
3959 	}
3960 }
3961 
rx_complete(struct urb * urb)3962 static void rx_complete(struct urb *urb)
3963 {
3964 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3965 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3966 	struct lan78xx_net *dev = entry->dev;
3967 	int urb_status = urb->status;
3968 	enum skb_state state;
3969 
3970 	netif_dbg(dev, rx_status, dev->net,
3971 		  "rx done: status %d", urb->status);
3972 
3973 	skb_put(skb, urb->actual_length);
3974 	state = rx_done;
3975 
3976 	if (urb != entry->urb)
3977 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3978 
3979 	switch (urb_status) {
3980 	case 0:
3981 		if (skb->len < RX_SKB_MIN_LEN) {
3982 			state = rx_cleanup;
3983 			dev->net->stats.rx_errors++;
3984 			dev->net->stats.rx_length_errors++;
3985 			netif_dbg(dev, rx_err, dev->net,
3986 				  "rx length %d\n", skb->len);
3987 		}
3988 		usb_mark_last_busy(dev->udev);
3989 		break;
3990 	case -EPIPE:
3991 		dev->net->stats.rx_errors++;
3992 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3993 		fallthrough;
3994 	case -ECONNRESET:				/* async unlink */
3995 	case -ESHUTDOWN:				/* hardware gone */
3996 		netif_dbg(dev, ifdown, dev->net,
3997 			  "rx shutdown, code %d\n", urb_status);
3998 		state = rx_cleanup;
3999 		break;
4000 	case -EPROTO:
4001 	case -ETIME:
4002 	case -EILSEQ:
4003 		dev->net->stats.rx_errors++;
4004 		state = rx_cleanup;
4005 		break;
4006 
4007 	/* data overrun ... flush fifo? */
4008 	case -EOVERFLOW:
4009 		dev->net->stats.rx_over_errors++;
4010 		fallthrough;
4011 
4012 	default:
4013 		state = rx_cleanup;
4014 		dev->net->stats.rx_errors++;
4015 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4016 		break;
4017 	}
4018 
4019 	state = defer_bh(dev, skb, &dev->rxq, state);
4020 }
4021 
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)4022 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4023 {
4024 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4025 	size_t size = dev->rx_urb_size;
4026 	struct urb *urb = entry->urb;
4027 	unsigned long lockflags;
4028 	int ret = 0;
4029 
4030 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4031 			  skb->data, size, rx_complete, skb);
4032 
4033 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4034 
4035 	if (netif_device_present(dev->net) &&
4036 	    netif_running(dev->net) &&
4037 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4038 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4039 		ret = usb_submit_urb(urb, flags);
4040 		switch (ret) {
4041 		case 0:
4042 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4043 			break;
4044 		case -EPIPE:
4045 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4046 			break;
4047 		case -ENODEV:
4048 		case -ENOENT:
4049 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4050 			netif_device_detach(dev->net);
4051 			break;
4052 		case -EHOSTUNREACH:
4053 			ret = -ENOLINK;
4054 			napi_schedule(&dev->napi);
4055 			break;
4056 		default:
4057 			netif_dbg(dev, rx_err, dev->net,
4058 				  "rx submit, %d\n", ret);
4059 			napi_schedule(&dev->napi);
4060 			break;
4061 		}
4062 	} else {
4063 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4064 		ret = -ENOLINK;
4065 	}
4066 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4067 
4068 	if (ret)
4069 		lan78xx_release_rx_buf(dev, skb);
4070 
4071 	return ret;
4072 }
4073 
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)4074 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4075 {
4076 	struct sk_buff *rx_buf;
4077 
4078 	/* Ensure the maximum number of Rx URBs is submitted
4079 	 */
4080 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4081 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4082 			break;
4083 	}
4084 }
4085 
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)4086 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4087 				    struct sk_buff *rx_buf)
4088 {
4089 	/* reset SKB data pointers */
4090 
4091 	rx_buf->data = rx_buf->head;
4092 	skb_reset_tail_pointer(rx_buf);
4093 	rx_buf->len = 0;
4094 	rx_buf->data_len = 0;
4095 
4096 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4097 }
4098 
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)4099 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4100 {
4101 	u32 tx_cmd_a;
4102 	u32 tx_cmd_b;
4103 
4104 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4105 
4106 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4107 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4108 
4109 	tx_cmd_b = 0;
4110 	if (skb_is_gso(skb)) {
4111 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4112 
4113 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4114 
4115 		tx_cmd_a |= TX_CMD_A_LSO_;
4116 	}
4117 
4118 	if (skb_vlan_tag_present(skb)) {
4119 		tx_cmd_a |= TX_CMD_A_IVTG_;
4120 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4121 	}
4122 
4123 	put_unaligned_le32(tx_cmd_a, buffer);
4124 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4125 }
4126 
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)4127 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4128 					    struct sk_buff *tx_buf)
4129 {
4130 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4131 	int remain = dev->tx_urb_size;
4132 	u8 *tx_data = tx_buf->data;
4133 	u32 urb_len = 0;
4134 
4135 	entry->num_of_packet = 0;
4136 	entry->length = 0;
4137 
4138 	/* Work through the pending SKBs and copy the data of each SKB into
4139 	 * the URB buffer if there room for all the SKB data.
4140 	 *
4141 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4142 	 */
4143 	while (remain >= TX_SKB_MIN_LEN) {
4144 		unsigned int pending_bytes;
4145 		unsigned int align_bytes;
4146 		struct sk_buff *skb;
4147 		unsigned int len;
4148 
4149 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4150 
4151 		if (!skb)
4152 			break;
4153 
4154 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4155 			      TX_ALIGNMENT;
4156 		len = align_bytes + TX_CMD_LEN + skb->len;
4157 		if (len > remain) {
4158 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4159 			break;
4160 		}
4161 
4162 		tx_data += align_bytes;
4163 
4164 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4165 		tx_data += TX_CMD_LEN;
4166 
4167 		len = skb->len;
4168 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4169 			struct net_device_stats *stats = &dev->net->stats;
4170 
4171 			stats->tx_dropped++;
4172 			dev_kfree_skb_any(skb);
4173 			tx_data -= TX_CMD_LEN;
4174 			continue;
4175 		}
4176 
4177 		tx_data += len;
4178 		entry->length += len;
4179 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4180 
4181 		dev_kfree_skb_any(skb);
4182 
4183 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4184 
4185 		remain = dev->tx_urb_size - urb_len;
4186 	}
4187 
4188 	skb_put(tx_buf, urb_len);
4189 
4190 	return entry;
4191 }
4192 
lan78xx_tx_bh(struct lan78xx_net * dev)4193 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4194 {
4195 	int ret;
4196 
4197 	/* Start the stack Tx queue if it was stopped
4198 	 */
4199 	netif_tx_lock(dev->net);
4200 	if (netif_queue_stopped(dev->net)) {
4201 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4202 			netif_wake_queue(dev->net);
4203 	}
4204 	netif_tx_unlock(dev->net);
4205 
4206 	/* Go through the Tx pending queue and set up URBs to transfer
4207 	 * the data to the device. Stop if no more pending data or URBs,
4208 	 * or if an error occurs when a URB is submitted.
4209 	 */
4210 	do {
4211 		struct skb_data *entry;
4212 		struct sk_buff *tx_buf;
4213 		unsigned long flags;
4214 
4215 		if (skb_queue_empty(&dev->txq_pend))
4216 			break;
4217 
4218 		tx_buf = lan78xx_get_tx_buf(dev);
4219 		if (!tx_buf)
4220 			break;
4221 
4222 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4223 
4224 		spin_lock_irqsave(&dev->txq.lock, flags);
4225 		ret = usb_autopm_get_interface_async(dev->intf);
4226 		if (ret < 0) {
4227 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4228 			goto out;
4229 		}
4230 
4231 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4232 				  tx_buf->data, tx_buf->len, tx_complete,
4233 				  tx_buf);
4234 
4235 		if (tx_buf->len % dev->maxpacket == 0) {
4236 			/* send USB_ZERO_PACKET */
4237 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4238 		}
4239 
4240 #ifdef CONFIG_PM
4241 		/* if device is asleep stop outgoing packet processing */
4242 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4243 			usb_anchor_urb(entry->urb, &dev->deferred);
4244 			netif_stop_queue(dev->net);
4245 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4246 			netdev_dbg(dev->net,
4247 				   "Delaying transmission for resumption\n");
4248 			return;
4249 		}
4250 #endif
4251 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4252 		switch (ret) {
4253 		case 0:
4254 			netif_trans_update(dev->net);
4255 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4256 			break;
4257 		case -EPIPE:
4258 			netif_stop_queue(dev->net);
4259 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4260 			usb_autopm_put_interface_async(dev->intf);
4261 			break;
4262 		case -ENODEV:
4263 		case -ENOENT:
4264 			netif_dbg(dev, tx_err, dev->net,
4265 				  "tx submit urb err %d (disconnected?)", ret);
4266 			netif_device_detach(dev->net);
4267 			break;
4268 		default:
4269 			usb_autopm_put_interface_async(dev->intf);
4270 			netif_dbg(dev, tx_err, dev->net,
4271 				  "tx submit urb err %d\n", ret);
4272 			break;
4273 		}
4274 
4275 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4276 
4277 		if (ret) {
4278 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4279 out:
4280 			dev->net->stats.tx_dropped += entry->num_of_packet;
4281 			lan78xx_release_tx_buf(dev, tx_buf);
4282 		}
4283 	} while (ret == 0);
4284 }
4285 
lan78xx_bh(struct lan78xx_net * dev,int budget)4286 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4287 {
4288 	struct sk_buff_head done;
4289 	struct sk_buff *rx_buf;
4290 	struct skb_data *entry;
4291 	unsigned long flags;
4292 	int work_done = 0;
4293 
4294 	/* Pass frames received in the last NAPI cycle before
4295 	 * working on newly completed URBs.
4296 	 */
4297 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4298 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4299 		++work_done;
4300 	}
4301 
4302 	/* Take a snapshot of the done queue and move items to a
4303 	 * temporary queue. Rx URB completions will continue to add
4304 	 * to the done queue.
4305 	 */
4306 	__skb_queue_head_init(&done);
4307 
4308 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4309 	skb_queue_splice_init(&dev->rxq_done, &done);
4310 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4311 
4312 	/* Extract receive frames from completed URBs and
4313 	 * pass them to the stack. Re-submit each completed URB.
4314 	 */
4315 	while ((work_done < budget) &&
4316 	       (rx_buf = __skb_dequeue(&done))) {
4317 		entry = (struct skb_data *)(rx_buf->cb);
4318 		switch (entry->state) {
4319 		case rx_done:
4320 			rx_process(dev, rx_buf, budget, &work_done);
4321 			break;
4322 		case rx_cleanup:
4323 			break;
4324 		default:
4325 			netdev_dbg(dev->net, "rx buf state %d\n",
4326 				   entry->state);
4327 			break;
4328 		}
4329 
4330 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4331 	}
4332 
4333 	/* If budget was consumed before processing all the URBs put them
4334 	 * back on the front of the done queue. They will be first to be
4335 	 * processed in the next NAPI cycle.
4336 	 */
4337 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4338 	skb_queue_splice(&done, &dev->rxq_done);
4339 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4340 
4341 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4342 		/* reset update timer delta */
4343 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4344 			dev->delta = 1;
4345 			mod_timer(&dev->stat_monitor,
4346 				  jiffies + STAT_UPDATE_TIMER);
4347 		}
4348 
4349 		/* Submit all free Rx URBs */
4350 
4351 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4352 			lan78xx_rx_urb_submit_all(dev);
4353 
4354 		/* Submit new Tx URBs */
4355 
4356 		lan78xx_tx_bh(dev);
4357 	}
4358 
4359 	return work_done;
4360 }
4361 
lan78xx_poll(struct napi_struct * napi,int budget)4362 static int lan78xx_poll(struct napi_struct *napi, int budget)
4363 {
4364 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4365 	int result = budget;
4366 	int work_done;
4367 
4368 	/* Don't do any work if the device is suspended */
4369 
4370 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4371 		napi_complete_done(napi, 0);
4372 		return 0;
4373 	}
4374 
4375 	/* Process completed URBs and submit new URBs */
4376 
4377 	work_done = lan78xx_bh(dev, budget);
4378 
4379 	if (work_done < budget) {
4380 		napi_complete_done(napi, work_done);
4381 
4382 		/* Start a new polling cycle if data was received or
4383 		 * data is waiting to be transmitted.
4384 		 */
4385 		if (!skb_queue_empty(&dev->rxq_done)) {
4386 			napi_schedule(napi);
4387 		} else if (netif_carrier_ok(dev->net)) {
4388 			if (skb_queue_empty(&dev->txq) &&
4389 			    !skb_queue_empty(&dev->txq_pend)) {
4390 				napi_schedule(napi);
4391 			} else {
4392 				netif_tx_lock(dev->net);
4393 				if (netif_queue_stopped(dev->net)) {
4394 					netif_wake_queue(dev->net);
4395 					napi_schedule(napi);
4396 				}
4397 				netif_tx_unlock(dev->net);
4398 			}
4399 		}
4400 		result = work_done;
4401 	}
4402 
4403 	return result;
4404 }
4405 
lan78xx_delayedwork(struct work_struct * work)4406 static void lan78xx_delayedwork(struct work_struct *work)
4407 {
4408 	int status;
4409 	struct lan78xx_net *dev;
4410 
4411 	dev = container_of(work, struct lan78xx_net, wq.work);
4412 
4413 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4414 		return;
4415 
4416 	if (usb_autopm_get_interface(dev->intf) < 0)
4417 		return;
4418 
4419 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4420 		unlink_urbs(dev, &dev->txq);
4421 
4422 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4423 		if (status < 0 &&
4424 		    status != -EPIPE &&
4425 		    status != -ESHUTDOWN) {
4426 			if (netif_msg_tx_err(dev))
4427 				netdev_err(dev->net,
4428 					   "can't clear tx halt, status %d\n",
4429 					   status);
4430 		} else {
4431 			clear_bit(EVENT_TX_HALT, &dev->flags);
4432 			if (status != -ESHUTDOWN)
4433 				netif_wake_queue(dev->net);
4434 		}
4435 	}
4436 
4437 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4438 		unlink_urbs(dev, &dev->rxq);
4439 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4440 		if (status < 0 &&
4441 		    status != -EPIPE &&
4442 		    status != -ESHUTDOWN) {
4443 			if (netif_msg_rx_err(dev))
4444 				netdev_err(dev->net,
4445 					   "can't clear rx halt, status %d\n",
4446 					   status);
4447 		} else {
4448 			clear_bit(EVENT_RX_HALT, &dev->flags);
4449 			napi_schedule(&dev->napi);
4450 		}
4451 	}
4452 
4453 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4454 		int ret = 0;
4455 
4456 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4457 		ret = lan78xx_phy_int_ack(dev);
4458 		if (ret)
4459 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4460 				    ERR_PTR(ret));
4461 	}
4462 
4463 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4464 		lan78xx_update_stats(dev);
4465 
4466 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4467 
4468 		mod_timer(&dev->stat_monitor,
4469 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4470 
4471 		dev->delta = min((dev->delta * 2), 50);
4472 	}
4473 
4474 	usb_autopm_put_interface(dev->intf);
4475 }
4476 
intr_complete(struct urb * urb)4477 static void intr_complete(struct urb *urb)
4478 {
4479 	struct lan78xx_net *dev = urb->context;
4480 	int status = urb->status;
4481 
4482 	switch (status) {
4483 	/* success */
4484 	case 0:
4485 		lan78xx_status(dev, urb);
4486 		break;
4487 
4488 	/* software-driven interface shutdown */
4489 	case -ENOENT:			/* urb killed */
4490 	case -ENODEV:			/* hardware gone */
4491 	case -ESHUTDOWN:		/* hardware gone */
4492 		netif_dbg(dev, ifdown, dev->net,
4493 			  "intr shutdown, code %d\n", status);
4494 		return;
4495 
4496 	/* NOTE:  not throttling like RX/TX, since this endpoint
4497 	 * already polls infrequently
4498 	 */
4499 	default:
4500 		netdev_dbg(dev->net, "intr status %d\n", status);
4501 		break;
4502 	}
4503 
4504 	if (!netif_device_present(dev->net) ||
4505 	    !netif_running(dev->net)) {
4506 		netdev_warn(dev->net, "not submitting new status URB");
4507 		return;
4508 	}
4509 
4510 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4511 	status = usb_submit_urb(urb, GFP_ATOMIC);
4512 
4513 	switch (status) {
4514 	case  0:
4515 		break;
4516 	case -ENODEV:
4517 	case -ENOENT:
4518 		netif_dbg(dev, timer, dev->net,
4519 			  "intr resubmit %d (disconnect?)", status);
4520 		netif_device_detach(dev->net);
4521 		break;
4522 	default:
4523 		netif_err(dev, timer, dev->net,
4524 			  "intr resubmit --> %d\n", status);
4525 		break;
4526 	}
4527 }
4528 
lan78xx_disconnect(struct usb_interface * intf)4529 static void lan78xx_disconnect(struct usb_interface *intf)
4530 {
4531 	struct lan78xx_net *dev;
4532 	struct usb_device *udev;
4533 	struct net_device *net;
4534 
4535 	dev = usb_get_intfdata(intf);
4536 	usb_set_intfdata(intf, NULL);
4537 	if (!dev)
4538 		return;
4539 
4540 	udev = interface_to_usbdev(intf);
4541 	net = dev->net;
4542 
4543 	rtnl_lock();
4544 	phylink_stop(dev->phylink);
4545 	phylink_disconnect_phy(dev->phylink);
4546 	rtnl_unlock();
4547 
4548 	netif_napi_del(&dev->napi);
4549 
4550 	unregister_netdev(net);
4551 
4552 	timer_shutdown_sync(&dev->stat_monitor);
4553 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4554 	cancel_delayed_work_sync(&dev->wq);
4555 
4556 	phylink_destroy(dev->phylink);
4557 
4558 	usb_scuttle_anchored_urbs(&dev->deferred);
4559 
4560 	lan78xx_unbind(dev, intf);
4561 
4562 	lan78xx_free_tx_resources(dev);
4563 	lan78xx_free_rx_resources(dev);
4564 
4565 	usb_kill_urb(dev->urb_intr);
4566 	usb_free_urb(dev->urb_intr);
4567 
4568 	free_netdev(net);
4569 	usb_put_dev(udev);
4570 }
4571 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4572 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4573 {
4574 	struct lan78xx_net *dev = netdev_priv(net);
4575 
4576 	unlink_urbs(dev, &dev->txq);
4577 	napi_schedule(&dev->napi);
4578 }
4579 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4580 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4581 						struct net_device *netdev,
4582 						netdev_features_t features)
4583 {
4584 	struct lan78xx_net *dev = netdev_priv(netdev);
4585 
4586 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4587 		features &= ~NETIF_F_GSO_MASK;
4588 
4589 	features = vlan_features_check(skb, features);
4590 	features = vxlan_features_check(skb, features);
4591 
4592 	return features;
4593 }
4594 
4595 static const struct net_device_ops lan78xx_netdev_ops = {
4596 	.ndo_open		= lan78xx_open,
4597 	.ndo_stop		= lan78xx_stop,
4598 	.ndo_start_xmit		= lan78xx_start_xmit,
4599 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4600 	.ndo_change_mtu		= lan78xx_change_mtu,
4601 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4602 	.ndo_validate_addr	= eth_validate_addr,
4603 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4604 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4605 	.ndo_set_features	= lan78xx_set_features,
4606 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4607 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4608 	.ndo_features_check	= lan78xx_features_check,
4609 };
4610 
lan78xx_stat_monitor(struct timer_list * t)4611 static void lan78xx_stat_monitor(struct timer_list *t)
4612 {
4613 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4614 
4615 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4616 }
4617 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4618 static int lan78xx_probe(struct usb_interface *intf,
4619 			 const struct usb_device_id *id)
4620 {
4621 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4622 	struct lan78xx_net *dev;
4623 	struct net_device *netdev;
4624 	struct usb_device *udev;
4625 	int ret;
4626 	unsigned int maxp;
4627 	unsigned int period;
4628 	u8 *buf = NULL;
4629 
4630 	udev = interface_to_usbdev(intf);
4631 	udev = usb_get_dev(udev);
4632 
4633 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4634 	if (!netdev) {
4635 		dev_err(&intf->dev, "Error: OOM\n");
4636 		ret = -ENOMEM;
4637 		goto out1;
4638 	}
4639 
4640 	SET_NETDEV_DEV(netdev, &intf->dev);
4641 
4642 	dev = netdev_priv(netdev);
4643 	dev->udev = udev;
4644 	dev->intf = intf;
4645 	dev->net = netdev;
4646 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4647 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4648 
4649 	skb_queue_head_init(&dev->rxq);
4650 	skb_queue_head_init(&dev->txq);
4651 	skb_queue_head_init(&dev->rxq_done);
4652 	skb_queue_head_init(&dev->txq_pend);
4653 	skb_queue_head_init(&dev->rxq_overflow);
4654 	mutex_init(&dev->mdiobus_mutex);
4655 	mutex_init(&dev->dev_mutex);
4656 
4657 	ret = lan78xx_urb_config_init(dev);
4658 	if (ret < 0)
4659 		goto out2;
4660 
4661 	ret = lan78xx_alloc_tx_resources(dev);
4662 	if (ret < 0)
4663 		goto out2;
4664 
4665 	ret = lan78xx_alloc_rx_resources(dev);
4666 	if (ret < 0)
4667 		goto out3;
4668 
4669 	/* MTU range: 68 - 9000 */
4670 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4671 
4672 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4673 
4674 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4675 
4676 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4677 	init_usb_anchor(&dev->deferred);
4678 
4679 	netdev->netdev_ops = &lan78xx_netdev_ops;
4680 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4681 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4682 
4683 	dev->delta = 1;
4684 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4685 
4686 	mutex_init(&dev->stats.access_lock);
4687 
4688 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4689 		ret = -ENODEV;
4690 		goto out4;
4691 	}
4692 
4693 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4694 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4695 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4696 		ret = -ENODEV;
4697 		goto out4;
4698 	}
4699 
4700 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4701 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4702 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4703 		ret = -ENODEV;
4704 		goto out4;
4705 	}
4706 
4707 	ep_intr = &intf->cur_altsetting->endpoint[2];
4708 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4709 		ret = -ENODEV;
4710 		goto out4;
4711 	}
4712 
4713 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4714 					usb_endpoint_num(&ep_intr->desc));
4715 
4716 	ret = lan78xx_bind(dev, intf);
4717 	if (ret < 0)
4718 		goto out4;
4719 
4720 	period = ep_intr->desc.bInterval;
4721 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4722 
4723 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4724 	if (!dev->urb_intr) {
4725 		ret = -ENOMEM;
4726 		goto out5;
4727 	}
4728 
4729 	buf = kmalloc(maxp, GFP_KERNEL);
4730 	if (!buf) {
4731 		ret = -ENOMEM;
4732 		goto free_urbs;
4733 	}
4734 
4735 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4736 			 dev->pipe_intr, buf, maxp,
4737 			 intr_complete, dev, period);
4738 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4739 
4740 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4741 
4742 	/* Reject broken descriptors. */
4743 	if (dev->maxpacket == 0) {
4744 		ret = -ENODEV;
4745 		goto free_urbs;
4746 	}
4747 
4748 	/* driver requires remote-wakeup capability during autosuspend. */
4749 	intf->needs_remote_wakeup = 1;
4750 
4751 	ret = lan78xx_phy_init(dev);
4752 	if (ret < 0)
4753 		goto free_urbs;
4754 
4755 	ret = register_netdev(netdev);
4756 	if (ret != 0) {
4757 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4758 		goto phy_uninit;
4759 	}
4760 
4761 	usb_set_intfdata(intf, dev);
4762 
4763 	ret = device_set_wakeup_enable(&udev->dev, true);
4764 
4765 	 /* Default delay of 2sec has more overhead than advantage.
4766 	  * Set to 10sec as default.
4767 	  */
4768 	pm_runtime_set_autosuspend_delay(&udev->dev,
4769 					 DEFAULT_AUTOSUSPEND_DELAY);
4770 
4771 	return 0;
4772 
4773 phy_uninit:
4774 	lan78xx_phy_uninit(dev);
4775 free_urbs:
4776 	usb_free_urb(dev->urb_intr);
4777 out5:
4778 	lan78xx_unbind(dev, intf);
4779 out4:
4780 	netif_napi_del(&dev->napi);
4781 	lan78xx_free_rx_resources(dev);
4782 out3:
4783 	lan78xx_free_tx_resources(dev);
4784 out2:
4785 	free_netdev(netdev);
4786 out1:
4787 	usb_put_dev(udev);
4788 
4789 	return ret;
4790 }
4791 
lan78xx_wakeframe_crc16(const u8 * buf,int len)4792 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4793 {
4794 	const u16 crc16poly = 0x8005;
4795 	int i;
4796 	u16 bit, crc, msb;
4797 	u8 data;
4798 
4799 	crc = 0xFFFF;
4800 	for (i = 0; i < len; i++) {
4801 		data = *buf++;
4802 		for (bit = 0; bit < 8; bit++) {
4803 			msb = crc >> 15;
4804 			crc <<= 1;
4805 
4806 			if (msb ^ (u16)(data & 1)) {
4807 				crc ^= crc16poly;
4808 				crc |= (u16)0x0001U;
4809 			}
4810 			data >>= 1;
4811 		}
4812 	}
4813 
4814 	return crc;
4815 }
4816 
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4817 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4818 {
4819 	u32 buf;
4820 	int ret;
4821 
4822 	ret = lan78xx_stop_tx_path(dev);
4823 	if (ret < 0)
4824 		return ret;
4825 
4826 	ret = lan78xx_stop_rx_path(dev);
4827 	if (ret < 0)
4828 		return ret;
4829 
4830 	/* auto suspend (selective suspend) */
4831 
4832 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4833 	if (ret < 0)
4834 		return ret;
4835 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4836 	if (ret < 0)
4837 		return ret;
4838 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4839 	if (ret < 0)
4840 		return ret;
4841 
4842 	/* set goodframe wakeup */
4843 
4844 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4845 	if (ret < 0)
4846 		return ret;
4847 
4848 	buf |= WUCSR_RFE_WAKE_EN_;
4849 	buf |= WUCSR_STORE_WAKE_;
4850 
4851 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4852 	if (ret < 0)
4853 		return ret;
4854 
4855 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4856 	if (ret < 0)
4857 		return ret;
4858 
4859 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4860 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4861 	buf |= PMT_CTL_PHY_WAKE_EN_;
4862 	buf |= PMT_CTL_WOL_EN_;
4863 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4864 	buf |= PMT_CTL_SUS_MODE_3_;
4865 
4866 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4867 	if (ret < 0)
4868 		return ret;
4869 
4870 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4871 	if (ret < 0)
4872 		return ret;
4873 
4874 	buf |= PMT_CTL_WUPS_MASK_;
4875 
4876 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4877 	if (ret < 0)
4878 		return ret;
4879 
4880 	ret = lan78xx_start_rx_path(dev);
4881 
4882 	return ret;
4883 }
4884 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4885 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4886 {
4887 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4888 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4889 	const u8 arp_type[2] = { 0x08, 0x06 };
4890 	u32 temp_pmt_ctl;
4891 	int mask_index;
4892 	u32 temp_wucsr;
4893 	u32 buf;
4894 	u16 crc;
4895 	int ret;
4896 
4897 	ret = lan78xx_stop_tx_path(dev);
4898 	if (ret < 0)
4899 		return ret;
4900 	ret = lan78xx_stop_rx_path(dev);
4901 	if (ret < 0)
4902 		return ret;
4903 
4904 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4905 	if (ret < 0)
4906 		return ret;
4907 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4908 	if (ret < 0)
4909 		return ret;
4910 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4911 	if (ret < 0)
4912 		return ret;
4913 
4914 	temp_wucsr = 0;
4915 
4916 	temp_pmt_ctl = 0;
4917 
4918 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4919 	if (ret < 0)
4920 		return ret;
4921 
4922 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4923 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4924 
4925 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4926 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4927 		if (ret < 0)
4928 			return ret;
4929 	}
4930 
4931 	mask_index = 0;
4932 	if (wol & WAKE_PHY) {
4933 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4934 
4935 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4936 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4937 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4938 	}
4939 	if (wol & WAKE_MAGIC) {
4940 		temp_wucsr |= WUCSR_MPEN_;
4941 
4942 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4943 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4944 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4945 	}
4946 	if (wol & WAKE_BCAST) {
4947 		temp_wucsr |= WUCSR_BCST_EN_;
4948 
4949 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4950 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4951 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4952 	}
4953 	if (wol & WAKE_MCAST) {
4954 		temp_wucsr |= WUCSR_WAKE_EN_;
4955 
4956 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4957 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4958 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4959 					WUF_CFGX_EN_ |
4960 					WUF_CFGX_TYPE_MCAST_ |
4961 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4962 					(crc & WUF_CFGX_CRC16_MASK_));
4963 		if (ret < 0)
4964 			return ret;
4965 
4966 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4967 		if (ret < 0)
4968 			return ret;
4969 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4970 		if (ret < 0)
4971 			return ret;
4972 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4973 		if (ret < 0)
4974 			return ret;
4975 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4976 		if (ret < 0)
4977 			return ret;
4978 
4979 		mask_index++;
4980 
4981 		/* for IPv6 Multicast */
4982 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4983 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4984 					WUF_CFGX_EN_ |
4985 					WUF_CFGX_TYPE_MCAST_ |
4986 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4987 					(crc & WUF_CFGX_CRC16_MASK_));
4988 		if (ret < 0)
4989 			return ret;
4990 
4991 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4992 		if (ret < 0)
4993 			return ret;
4994 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4995 		if (ret < 0)
4996 			return ret;
4997 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4998 		if (ret < 0)
4999 			return ret;
5000 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5001 		if (ret < 0)
5002 			return ret;
5003 
5004 		mask_index++;
5005 
5006 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5007 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5008 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5009 	}
5010 	if (wol & WAKE_UCAST) {
5011 		temp_wucsr |= WUCSR_PFDA_EN_;
5012 
5013 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5014 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5015 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5016 	}
5017 	if (wol & WAKE_ARP) {
5018 		temp_wucsr |= WUCSR_WAKE_EN_;
5019 
5020 		/* set WUF_CFG & WUF_MASK
5021 		 * for packettype (offset 12,13) = ARP (0x0806)
5022 		 */
5023 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5024 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5025 					WUF_CFGX_EN_ |
5026 					WUF_CFGX_TYPE_ALL_ |
5027 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5028 					(crc & WUF_CFGX_CRC16_MASK_));
5029 		if (ret < 0)
5030 			return ret;
5031 
5032 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5033 		if (ret < 0)
5034 			return ret;
5035 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5036 		if (ret < 0)
5037 			return ret;
5038 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5039 		if (ret < 0)
5040 			return ret;
5041 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5042 		if (ret < 0)
5043 			return ret;
5044 
5045 		mask_index++;
5046 
5047 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5048 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5049 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5050 	}
5051 
5052 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5053 	if (ret < 0)
5054 		return ret;
5055 
5056 	/* when multiple WOL bits are set */
5057 	if (hweight_long((unsigned long)wol) > 1) {
5058 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5059 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5060 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5061 	}
5062 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5063 	if (ret < 0)
5064 		return ret;
5065 
5066 	/* clear WUPS */
5067 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5068 	if (ret < 0)
5069 		return ret;
5070 
5071 	buf |= PMT_CTL_WUPS_MASK_;
5072 
5073 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5074 	if (ret < 0)
5075 		return ret;
5076 
5077 	ret = lan78xx_start_rx_path(dev);
5078 
5079 	return ret;
5080 }
5081 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)5082 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5083 {
5084 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5085 	bool dev_open;
5086 	int ret;
5087 
5088 	mutex_lock(&dev->dev_mutex);
5089 
5090 	netif_dbg(dev, ifdown, dev->net,
5091 		  "suspending: pm event %#x", message.event);
5092 
5093 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5094 
5095 	if (dev_open) {
5096 		spin_lock_irq(&dev->txq.lock);
5097 		/* don't autosuspend while transmitting */
5098 		if ((skb_queue_len(&dev->txq) ||
5099 		     skb_queue_len(&dev->txq_pend)) &&
5100 		    PMSG_IS_AUTO(message)) {
5101 			spin_unlock_irq(&dev->txq.lock);
5102 			ret = -EBUSY;
5103 			goto out;
5104 		} else {
5105 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5106 			spin_unlock_irq(&dev->txq.lock);
5107 		}
5108 
5109 		rtnl_lock();
5110 		phylink_suspend(dev->phylink, false);
5111 		rtnl_unlock();
5112 
5113 		/* stop RX */
5114 		ret = lan78xx_stop_rx_path(dev);
5115 		if (ret < 0)
5116 			goto out;
5117 
5118 		ret = lan78xx_flush_rx_fifo(dev);
5119 		if (ret < 0)
5120 			goto out;
5121 
5122 		/* stop Tx */
5123 		ret = lan78xx_stop_tx_path(dev);
5124 		if (ret < 0)
5125 			goto out;
5126 
5127 		/* empty out the Rx and Tx queues */
5128 		netif_device_detach(dev->net);
5129 		lan78xx_terminate_urbs(dev);
5130 		usb_kill_urb(dev->urb_intr);
5131 
5132 		/* reattach */
5133 		netif_device_attach(dev->net);
5134 
5135 		timer_delete(&dev->stat_monitor);
5136 
5137 		if (PMSG_IS_AUTO(message)) {
5138 			ret = lan78xx_set_auto_suspend(dev);
5139 			if (ret < 0)
5140 				goto out;
5141 		} else {
5142 			struct lan78xx_priv *pdata;
5143 
5144 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5145 			netif_carrier_off(dev->net);
5146 			ret = lan78xx_set_suspend(dev, pdata->wol);
5147 			if (ret < 0)
5148 				goto out;
5149 		}
5150 	} else {
5151 		/* Interface is down; don't allow WOL and PHY
5152 		 * events to wake up the host
5153 		 */
5154 		u32 buf;
5155 
5156 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5157 
5158 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5159 		if (ret < 0)
5160 			goto out;
5161 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5162 		if (ret < 0)
5163 			goto out;
5164 
5165 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5166 		if (ret < 0)
5167 			goto out;
5168 
5169 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5170 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5171 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5172 		buf |= PMT_CTL_SUS_MODE_3_;
5173 
5174 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5175 		if (ret < 0)
5176 			goto out;
5177 
5178 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5179 		if (ret < 0)
5180 			goto out;
5181 
5182 		buf |= PMT_CTL_WUPS_MASK_;
5183 
5184 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5185 		if (ret < 0)
5186 			goto out;
5187 	}
5188 
5189 	ret = 0;
5190 out:
5191 	mutex_unlock(&dev->dev_mutex);
5192 
5193 	return ret;
5194 }
5195 
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)5196 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5197 {
5198 	bool pipe_halted = false;
5199 	struct urb *urb;
5200 
5201 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5202 		struct sk_buff *skb = urb->context;
5203 		int ret;
5204 
5205 		if (!netif_device_present(dev->net) ||
5206 		    !netif_carrier_ok(dev->net) ||
5207 		    pipe_halted) {
5208 			lan78xx_release_tx_buf(dev, skb);
5209 			continue;
5210 		}
5211 
5212 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5213 
5214 		if (ret == 0) {
5215 			netif_trans_update(dev->net);
5216 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5217 		} else {
5218 			if (ret == -EPIPE) {
5219 				netif_stop_queue(dev->net);
5220 				pipe_halted = true;
5221 			} else if (ret == -ENODEV) {
5222 				netif_device_detach(dev->net);
5223 			}
5224 
5225 			lan78xx_release_tx_buf(dev, skb);
5226 		}
5227 	}
5228 
5229 	return pipe_halted;
5230 }
5231 
lan78xx_resume(struct usb_interface * intf)5232 static int lan78xx_resume(struct usb_interface *intf)
5233 {
5234 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5235 	bool dev_open;
5236 	int ret;
5237 
5238 	mutex_lock(&dev->dev_mutex);
5239 
5240 	netif_dbg(dev, ifup, dev->net, "resuming device");
5241 
5242 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5243 
5244 	if (dev_open) {
5245 		bool pipe_halted = false;
5246 
5247 		ret = lan78xx_flush_tx_fifo(dev);
5248 		if (ret < 0)
5249 			goto out;
5250 
5251 		if (dev->urb_intr) {
5252 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5253 
5254 			if (ret < 0) {
5255 				if (ret == -ENODEV)
5256 					netif_device_detach(dev->net);
5257 				netdev_warn(dev->net, "Failed to submit intr URB");
5258 			}
5259 		}
5260 
5261 		spin_lock_irq(&dev->txq.lock);
5262 
5263 		if (netif_device_present(dev->net)) {
5264 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5265 
5266 			if (pipe_halted)
5267 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5268 		}
5269 
5270 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5271 
5272 		spin_unlock_irq(&dev->txq.lock);
5273 
5274 		if (!pipe_halted &&
5275 		    netif_device_present(dev->net) &&
5276 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5277 			netif_start_queue(dev->net);
5278 
5279 		ret = lan78xx_start_tx_path(dev);
5280 		if (ret < 0)
5281 			goto out;
5282 
5283 		napi_schedule(&dev->napi);
5284 
5285 		if (!timer_pending(&dev->stat_monitor)) {
5286 			dev->delta = 1;
5287 			mod_timer(&dev->stat_monitor,
5288 				  jiffies + STAT_UPDATE_TIMER);
5289 		}
5290 
5291 	} else {
5292 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5293 	}
5294 
5295 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5296 	if (ret < 0)
5297 		goto out;
5298 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5299 	if (ret < 0)
5300 		goto out;
5301 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5302 	if (ret < 0)
5303 		goto out;
5304 
5305 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5306 					     WUCSR2_ARP_RCD_ |
5307 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5308 					     WUCSR2_IPV4_TCPSYN_RCD_);
5309 	if (ret < 0)
5310 		goto out;
5311 
5312 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5313 					    WUCSR_EEE_RX_WAKE_ |
5314 					    WUCSR_PFDA_FR_ |
5315 					    WUCSR_RFE_WAKE_FR_ |
5316 					    WUCSR_WUFR_ |
5317 					    WUCSR_MPR_ |
5318 					    WUCSR_BCST_FR_);
5319 	if (ret < 0)
5320 		goto out;
5321 
5322 	ret = 0;
5323 out:
5324 	mutex_unlock(&dev->dev_mutex);
5325 
5326 	return ret;
5327 }
5328 
lan78xx_reset_resume(struct usb_interface * intf)5329 static int lan78xx_reset_resume(struct usb_interface *intf)
5330 {
5331 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5332 	int ret;
5333 
5334 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5335 
5336 	ret = lan78xx_reset(dev);
5337 	if (ret < 0)
5338 		return ret;
5339 
5340 	ret = lan78xx_resume(intf);
5341 	if (ret < 0)
5342 		return ret;
5343 
5344 	rtnl_lock();
5345 	phylink_resume(dev->phylink);
5346 	rtnl_unlock();
5347 
5348 	return 0;
5349 }
5350 
5351 static const struct usb_device_id products[] = {
5352 	{
5353 	/* LAN7800 USB Gigabit Ethernet Device */
5354 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5355 	},
5356 	{
5357 	/* LAN7850 USB Gigabit Ethernet Device */
5358 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5359 	},
5360 	{
5361 	/* LAN7801 USB Gigabit Ethernet Device */
5362 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5363 	},
5364 	{
5365 	/* ATM2-AF USB Gigabit Ethernet Device */
5366 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5367 	},
5368 	{},
5369 };
5370 MODULE_DEVICE_TABLE(usb, products);
5371 
5372 static struct usb_driver lan78xx_driver = {
5373 	.name			= DRIVER_NAME,
5374 	.id_table		= products,
5375 	.probe			= lan78xx_probe,
5376 	.disconnect		= lan78xx_disconnect,
5377 	.suspend		= lan78xx_suspend,
5378 	.resume			= lan78xx_resume,
5379 	.reset_resume		= lan78xx_reset_resume,
5380 	.supports_autosuspend	= 1,
5381 	.disable_hub_initiated_lpm = 1,
5382 };
5383 
5384 module_usb_driver(lan78xx_driver);
5385 
5386 MODULE_AUTHOR(DRIVER_AUTHOR);
5387 MODULE_DESCRIPTION(DRIVER_DESC);
5388 MODULE_LICENSE("GPL");
5389