xref: /linux/drivers/net/usb/lan78xx.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/selftests.h>
24 #include <net/vxlan.h>
25 #include <linux/interrupt.h>
26 #include <linux/irqdomain.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/microchipphy.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 
51 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
52 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
53 					 (FLOW_THRESHOLD(off) << 8))
54 
55 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
56 #define FLOW_ON_SS			9216
57 #define FLOW_ON_HS			8704
58 
59 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
60 #define FLOW_OFF_SS			4096
61 #define FLOW_OFF_HS			1024
62 
63 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
64 #define DEFAULT_BULK_IN_DELAY		(0x0800)
65 #define MAX_SINGLE_PACKET_SIZE		(9000)
66 #define DEFAULT_TX_CSUM_ENABLE		(true)
67 #define DEFAULT_RX_CSUM_ENABLE		(true)
68 #define DEFAULT_TSO_CSUM_ENABLE		(true)
69 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
70 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
71 #define TX_ALIGNMENT			(4)
72 #define RXW_PADDING			2
73 
74 #define LAN78XX_USB_VENDOR_ID		(0x0424)
75 #define LAN7800_USB_PRODUCT_ID		(0x7800)
76 #define LAN7850_USB_PRODUCT_ID		(0x7850)
77 #define LAN7801_USB_PRODUCT_ID		(0x7801)
78 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
79 #define LAN78XX_OTP_MAGIC		(0x78F3)
80 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
81 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
82 
83 #define	MII_READ			1
84 #define	MII_WRITE			0
85 
86 #define EEPROM_INDICATOR		(0xA5)
87 #define EEPROM_MAC_OFFSET		(0x01)
88 #define MAX_EEPROM_SIZE			512
89 #define OTP_INDICATOR_1			(0xF3)
90 #define OTP_INDICATOR_2			(0xF7)
91 
92 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
93 					 WAKE_MCAST | WAKE_BCAST | \
94 					 WAKE_ARP | WAKE_MAGIC)
95 
96 #define TX_URB_NUM			10
97 #define TX_SS_URB_NUM			TX_URB_NUM
98 #define TX_HS_URB_NUM			TX_URB_NUM
99 #define TX_FS_URB_NUM			TX_URB_NUM
100 
101 /* A single URB buffer must be large enough to hold a complete jumbo packet
102  */
103 #define TX_SS_URB_SIZE			(32 * 1024)
104 #define TX_HS_URB_SIZE			(16 * 1024)
105 #define TX_FS_URB_SIZE			(10 * 1024)
106 
107 #define RX_SS_URB_NUM			30
108 #define RX_HS_URB_NUM			10
109 #define RX_FS_URB_NUM			10
110 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
111 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
112 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
113 
114 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
115 #define SS_BULK_IN_DELAY		0x2000
116 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
117 #define HS_BULK_IN_DELAY		0x2000
118 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
119 #define FS_BULK_IN_DELAY		0x2000
120 
121 #define TX_CMD_LEN			8
122 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
123 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 
125 #define RX_CMD_LEN			10
126 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
127 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
128 
129 /* USB related defines */
130 #define BULK_IN_PIPE			1
131 #define BULK_OUT_PIPE			2
132 
133 /* default autosuspend delay (mSec)*/
134 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
135 
136 /* statistic update interval (mSec) */
137 #define STAT_UPDATE_TIMER		(1 * 1000)
138 
139 /* time to wait for MAC or FCT to stop (jiffies) */
140 #define HW_DISABLE_TIMEOUT		(HZ / 10)
141 
142 /* time to wait between polling MAC or FCT state (ms) */
143 #define HW_DISABLE_DELAY_MS		1
144 
145 /* defines interrupts from interrupt EP */
146 #define MAX_INT_EP			(32)
147 #define INT_EP_INTEP			(31)
148 #define INT_EP_OTP_WR_DONE		(28)
149 #define INT_EP_EEE_TX_LPI_START		(26)
150 #define INT_EP_EEE_TX_LPI_STOP		(25)
151 #define INT_EP_EEE_RX_LPI		(24)
152 #define INT_EP_MAC_RESET_TIMEOUT	(23)
153 #define INT_EP_RDFO			(22)
154 #define INT_EP_TXE			(21)
155 #define INT_EP_USB_STATUS		(20)
156 #define INT_EP_TX_DIS			(19)
157 #define INT_EP_RX_DIS			(18)
158 #define INT_EP_PHY			(17)
159 #define INT_EP_DP			(16)
160 #define INT_EP_MAC_ERR			(15)
161 #define INT_EP_TDFU			(14)
162 #define INT_EP_TDFO			(13)
163 #define INT_EP_UTX			(12)
164 #define INT_EP_GPIO_11			(11)
165 #define INT_EP_GPIO_10			(10)
166 #define INT_EP_GPIO_9			(9)
167 #define INT_EP_GPIO_8			(8)
168 #define INT_EP_GPIO_7			(7)
169 #define INT_EP_GPIO_6			(6)
170 #define INT_EP_GPIO_5			(5)
171 #define INT_EP_GPIO_4			(4)
172 #define INT_EP_GPIO_3			(3)
173 #define INT_EP_GPIO_2			(2)
174 #define INT_EP_GPIO_1			(1)
175 #define INT_EP_GPIO_0			(0)
176 
177 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 	"RX FCS Errors",
179 	"RX Alignment Errors",
180 	"Rx Fragment Errors",
181 	"RX Jabber Errors",
182 	"RX Undersize Frame Errors",
183 	"RX Oversize Frame Errors",
184 	"RX Dropped Frames",
185 	"RX Unicast Byte Count",
186 	"RX Broadcast Byte Count",
187 	"RX Multicast Byte Count",
188 	"RX Unicast Frames",
189 	"RX Broadcast Frames",
190 	"RX Multicast Frames",
191 	"RX Pause Frames",
192 	"RX 64 Byte Frames",
193 	"RX 65 - 127 Byte Frames",
194 	"RX 128 - 255 Byte Frames",
195 	"RX 256 - 511 Bytes Frames",
196 	"RX 512 - 1023 Byte Frames",
197 	"RX 1024 - 1518 Byte Frames",
198 	"RX Greater 1518 Byte Frames",
199 	"EEE RX LPI Transitions",
200 	"EEE RX LPI Time",
201 	"TX FCS Errors",
202 	"TX Excess Deferral Errors",
203 	"TX Carrier Errors",
204 	"TX Bad Byte Count",
205 	"TX Single Collisions",
206 	"TX Multiple Collisions",
207 	"TX Excessive Collision",
208 	"TX Late Collisions",
209 	"TX Unicast Byte Count",
210 	"TX Broadcast Byte Count",
211 	"TX Multicast Byte Count",
212 	"TX Unicast Frames",
213 	"TX Broadcast Frames",
214 	"TX Multicast Frames",
215 	"TX Pause Frames",
216 	"TX 64 Byte Frames",
217 	"TX 65 - 127 Byte Frames",
218 	"TX 128 - 255 Byte Frames",
219 	"TX 256 - 511 Bytes Frames",
220 	"TX 512 - 1023 Byte Frames",
221 	"TX 1024 - 1518 Byte Frames",
222 	"TX Greater 1518 Byte Frames",
223 	"EEE TX LPI Transitions",
224 	"EEE TX LPI Time",
225 };
226 
227 struct lan78xx_statstage {
228 	u32 rx_fcs_errors;
229 	u32 rx_alignment_errors;
230 	u32 rx_fragment_errors;
231 	u32 rx_jabber_errors;
232 	u32 rx_undersize_frame_errors;
233 	u32 rx_oversize_frame_errors;
234 	u32 rx_dropped_frames;
235 	u32 rx_unicast_byte_count;
236 	u32 rx_broadcast_byte_count;
237 	u32 rx_multicast_byte_count;
238 	u32 rx_unicast_frames;
239 	u32 rx_broadcast_frames;
240 	u32 rx_multicast_frames;
241 	u32 rx_pause_frames;
242 	u32 rx_64_byte_frames;
243 	u32 rx_65_127_byte_frames;
244 	u32 rx_128_255_byte_frames;
245 	u32 rx_256_511_bytes_frames;
246 	u32 rx_512_1023_byte_frames;
247 	u32 rx_1024_1518_byte_frames;
248 	u32 rx_greater_1518_byte_frames;
249 	u32 eee_rx_lpi_transitions;
250 	u32 eee_rx_lpi_time;
251 	u32 tx_fcs_errors;
252 	u32 tx_excess_deferral_errors;
253 	u32 tx_carrier_errors;
254 	u32 tx_bad_byte_count;
255 	u32 tx_single_collisions;
256 	u32 tx_multiple_collisions;
257 	u32 tx_excessive_collision;
258 	u32 tx_late_collisions;
259 	u32 tx_unicast_byte_count;
260 	u32 tx_broadcast_byte_count;
261 	u32 tx_multicast_byte_count;
262 	u32 tx_unicast_frames;
263 	u32 tx_broadcast_frames;
264 	u32 tx_multicast_frames;
265 	u32 tx_pause_frames;
266 	u32 tx_64_byte_frames;
267 	u32 tx_65_127_byte_frames;
268 	u32 tx_128_255_byte_frames;
269 	u32 tx_256_511_bytes_frames;
270 	u32 tx_512_1023_byte_frames;
271 	u32 tx_1024_1518_byte_frames;
272 	u32 tx_greater_1518_byte_frames;
273 	u32 eee_tx_lpi_transitions;
274 	u32 eee_tx_lpi_time;
275 };
276 
277 struct lan78xx_statstage64 {
278 	u64 rx_fcs_errors;
279 	u64 rx_alignment_errors;
280 	u64 rx_fragment_errors;
281 	u64 rx_jabber_errors;
282 	u64 rx_undersize_frame_errors;
283 	u64 rx_oversize_frame_errors;
284 	u64 rx_dropped_frames;
285 	u64 rx_unicast_byte_count;
286 	u64 rx_broadcast_byte_count;
287 	u64 rx_multicast_byte_count;
288 	u64 rx_unicast_frames;
289 	u64 rx_broadcast_frames;
290 	u64 rx_multicast_frames;
291 	u64 rx_pause_frames;
292 	u64 rx_64_byte_frames;
293 	u64 rx_65_127_byte_frames;
294 	u64 rx_128_255_byte_frames;
295 	u64 rx_256_511_bytes_frames;
296 	u64 rx_512_1023_byte_frames;
297 	u64 rx_1024_1518_byte_frames;
298 	u64 rx_greater_1518_byte_frames;
299 	u64 eee_rx_lpi_transitions;
300 	u64 eee_rx_lpi_time;
301 	u64 tx_fcs_errors;
302 	u64 tx_excess_deferral_errors;
303 	u64 tx_carrier_errors;
304 	u64 tx_bad_byte_count;
305 	u64 tx_single_collisions;
306 	u64 tx_multiple_collisions;
307 	u64 tx_excessive_collision;
308 	u64 tx_late_collisions;
309 	u64 tx_unicast_byte_count;
310 	u64 tx_broadcast_byte_count;
311 	u64 tx_multicast_byte_count;
312 	u64 tx_unicast_frames;
313 	u64 tx_broadcast_frames;
314 	u64 tx_multicast_frames;
315 	u64 tx_pause_frames;
316 	u64 tx_64_byte_frames;
317 	u64 tx_65_127_byte_frames;
318 	u64 tx_128_255_byte_frames;
319 	u64 tx_256_511_bytes_frames;
320 	u64 tx_512_1023_byte_frames;
321 	u64 tx_1024_1518_byte_frames;
322 	u64 tx_greater_1518_byte_frames;
323 	u64 eee_tx_lpi_transitions;
324 	u64 eee_tx_lpi_time;
325 };
326 
327 static u32 lan78xx_regs[] = {
328 	ID_REV,
329 	INT_STS,
330 	HW_CFG,
331 	PMT_CTL,
332 	E2P_CMD,
333 	E2P_DATA,
334 	USB_STATUS,
335 	VLAN_TYPE,
336 	MAC_CR,
337 	MAC_RX,
338 	MAC_TX,
339 	FLOW,
340 	ERR_STS,
341 	MII_ACC,
342 	MII_DATA,
343 	EEE_TX_LPI_REQ_DLY,
344 	EEE_TW_TX_SYS,
345 	EEE_TX_LPI_REM_DLY,
346 	WUCSR
347 };
348 
349 #define PHY_REG_SIZE (32 * sizeof(u32))
350 
351 struct lan78xx_net;
352 
353 struct lan78xx_priv {
354 	struct lan78xx_net *dev;
355 	u32 rfe_ctl;
356 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
357 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
358 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
359 	struct mutex dataport_mutex; /* for dataport access */
360 	spinlock_t rfe_ctl_lock; /* for rfe register access */
361 	struct work_struct set_multicast;
362 	struct work_struct set_vlan;
363 	u32 wol;
364 };
365 
366 enum skb_state {
367 	illegal = 0,
368 	tx_start,
369 	tx_done,
370 	rx_start,
371 	rx_done,
372 	rx_cleanup,
373 	unlink_start
374 };
375 
376 struct skb_data {		/* skb->cb is one of these */
377 	struct urb *urb;
378 	struct lan78xx_net *dev;
379 	enum skb_state state;
380 	size_t length;
381 	int num_of_packet;
382 };
383 
384 #define EVENT_TX_HALT			0
385 #define EVENT_RX_HALT			1
386 #define EVENT_RX_MEMORY			2
387 #define EVENT_STS_SPLIT			3
388 #define EVENT_PHY_INT_ACK		4
389 #define EVENT_RX_PAUSED			5
390 #define EVENT_DEV_WAKING		6
391 #define EVENT_DEV_ASLEEP		7
392 #define EVENT_DEV_OPEN			8
393 #define EVENT_STAT_UPDATE		9
394 #define EVENT_DEV_DISCONNECT		10
395 
396 struct statstage {
397 	struct mutex			access_lock;	/* for stats access */
398 	struct lan78xx_statstage	saved;
399 	struct lan78xx_statstage	rollover_count;
400 	struct lan78xx_statstage	rollover_max;
401 	struct lan78xx_statstage64	curr_stat;
402 };
403 
404 struct irq_domain_data {
405 	struct irq_domain	*irqdomain;
406 	unsigned int		phyirq;
407 	struct irq_chip		*irqchip;
408 	irq_flow_handler_t	irq_handler;
409 	u32			irqenable;
410 	struct mutex		irq_lock;		/* for irq bus access */
411 };
412 
413 struct lan78xx_net {
414 	struct net_device	*net;
415 	struct usb_device	*udev;
416 	struct usb_interface	*intf;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 
452 	unsigned int		maxpacket;
453 	struct timer_list	stat_monitor;
454 
455 	unsigned long		data[5];
456 
457 	u32			chipid;
458 	u32			chiprev;
459 	struct mii_bus		*mdiobus;
460 	phy_interface_t		interface;
461 
462 	int			delta;
463 	struct statstage	stats;
464 
465 	struct irq_domain_data	domain_data;
466 
467 	struct phylink		*phylink;
468 	struct phylink_config	phylink_config;
469 };
470 
471 /* use ethtool to change the level for any given device */
472 static int msg_level = -1;
473 module_param(msg_level, int, 0);
474 MODULE_PARM_DESC(msg_level, "Override default message level");
475 
lan78xx_get_buf(struct sk_buff_head * buf_pool)476 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
477 {
478 	if (skb_queue_empty(buf_pool))
479 		return NULL;
480 
481 	return skb_dequeue(buf_pool);
482 }
483 
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)484 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
485 				struct sk_buff *buf)
486 {
487 	buf->data = buf->head;
488 	skb_reset_tail_pointer(buf);
489 
490 	buf->len = 0;
491 	buf->data_len = 0;
492 
493 	skb_queue_tail(buf_pool, buf);
494 }
495 
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)496 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
497 {
498 	struct skb_data *entry;
499 	struct sk_buff *buf;
500 
501 	while (!skb_queue_empty(buf_pool)) {
502 		buf = skb_dequeue(buf_pool);
503 		if (buf) {
504 			entry = (struct skb_data *)buf->cb;
505 			usb_free_urb(entry->urb);
506 			dev_kfree_skb_any(buf);
507 		}
508 	}
509 }
510 
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)511 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
512 				  size_t n_urbs, size_t urb_size,
513 				  struct lan78xx_net *dev)
514 {
515 	struct skb_data *entry;
516 	struct sk_buff *buf;
517 	struct urb *urb;
518 	int i;
519 
520 	skb_queue_head_init(buf_pool);
521 
522 	for (i = 0; i < n_urbs; i++) {
523 		buf = alloc_skb(urb_size, GFP_ATOMIC);
524 		if (!buf)
525 			goto error;
526 
527 		if (skb_linearize(buf) != 0) {
528 			dev_kfree_skb_any(buf);
529 			goto error;
530 		}
531 
532 		urb = usb_alloc_urb(0, GFP_ATOMIC);
533 		if (!urb) {
534 			dev_kfree_skb_any(buf);
535 			goto error;
536 		}
537 
538 		entry = (struct skb_data *)buf->cb;
539 		entry->urb = urb;
540 		entry->dev = dev;
541 		entry->length = 0;
542 		entry->num_of_packet = 0;
543 
544 		skb_queue_tail(buf_pool, buf);
545 	}
546 
547 	return 0;
548 
549 error:
550 	lan78xx_free_buf_pool(buf_pool);
551 
552 	return -ENOMEM;
553 }
554 
lan78xx_get_rx_buf(struct lan78xx_net * dev)555 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
556 {
557 	return lan78xx_get_buf(&dev->rxq_free);
558 }
559 
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)560 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
561 				   struct sk_buff *rx_buf)
562 {
563 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
564 }
565 
lan78xx_free_rx_resources(struct lan78xx_net * dev)566 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
567 {
568 	lan78xx_free_buf_pool(&dev->rxq_free);
569 }
570 
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)571 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
572 {
573 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
574 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
575 }
576 
lan78xx_get_tx_buf(struct lan78xx_net * dev)577 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
578 {
579 	return lan78xx_get_buf(&dev->txq_free);
580 }
581 
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)582 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
583 				   struct sk_buff *tx_buf)
584 {
585 	lan78xx_release_buf(&dev->txq_free, tx_buf);
586 }
587 
lan78xx_free_tx_resources(struct lan78xx_net * dev)588 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
589 {
590 	lan78xx_free_buf_pool(&dev->txq_free);
591 }
592 
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)593 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
594 {
595 	return lan78xx_alloc_buf_pool(&dev->txq_free,
596 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
597 }
598 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)599 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
600 {
601 	u32 *buf;
602 	int ret;
603 
604 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
605 		return -ENODEV;
606 
607 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
608 	if (!buf)
609 		return -ENOMEM;
610 
611 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
612 			      USB_VENDOR_REQUEST_READ_REGISTER,
613 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
614 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
615 	if (likely(ret >= 0)) {
616 		le32_to_cpus(buf);
617 		*data = *buf;
618 	} else if (net_ratelimit()) {
619 		netdev_warn(dev->net,
620 			    "Failed to read register index 0x%08x. ret = %pe",
621 			    index, ERR_PTR(ret));
622 	}
623 
624 	kfree(buf);
625 
626 	return ret < 0 ? ret : 0;
627 }
628 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)629 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
630 {
631 	u32 *buf;
632 	int ret;
633 
634 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
635 		return -ENODEV;
636 
637 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 	if (!buf)
639 		return -ENOMEM;
640 
641 	*buf = data;
642 	cpu_to_le32s(buf);
643 
644 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
646 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
647 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
648 	if (unlikely(ret < 0) &&
649 	    net_ratelimit()) {
650 		netdev_warn(dev->net,
651 			    "Failed to write register index 0x%08x. ret = %pe",
652 			    index, ERR_PTR(ret));
653 	}
654 
655 	kfree(buf);
656 
657 	return ret < 0 ? ret : 0;
658 }
659 
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)660 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
661 			      u32 data)
662 {
663 	int ret;
664 	u32 buf;
665 
666 	ret = lan78xx_read_reg(dev, reg, &buf);
667 	if (ret < 0)
668 		return ret;
669 
670 	buf &= ~mask;
671 	buf |= (mask & data);
672 
673 	return lan78xx_write_reg(dev, reg, buf);
674 }
675 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)676 static int lan78xx_read_stats(struct lan78xx_net *dev,
677 			      struct lan78xx_statstage *data)
678 {
679 	int ret = 0;
680 	int i;
681 	struct lan78xx_statstage *stats;
682 	u32 *src;
683 	u32 *dst;
684 
685 	stats = kmalloc_obj(*stats);
686 	if (!stats)
687 		return -ENOMEM;
688 
689 	ret = usb_control_msg(dev->udev,
690 			      usb_rcvctrlpipe(dev->udev, 0),
691 			      USB_VENDOR_REQUEST_GET_STATS,
692 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
693 			      0,
694 			      0,
695 			      (void *)stats,
696 			      sizeof(*stats),
697 			      USB_CTRL_SET_TIMEOUT);
698 	if (likely(ret >= 0)) {
699 		src = (u32 *)stats;
700 		dst = (u32 *)data;
701 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
702 			le32_to_cpus(&src[i]);
703 			dst[i] = src[i];
704 		}
705 	} else {
706 		netdev_warn(dev->net,
707 			    "Failed to read stat ret = %d", ret);
708 	}
709 
710 	kfree(stats);
711 
712 	return ret;
713 }
714 
715 #define check_counter_rollover(struct1, dev_stats, member)		\
716 	do {								\
717 		if ((struct1)->member < (dev_stats).saved.member)	\
718 			(dev_stats).rollover_count.member++;		\
719 	} while (0)
720 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)721 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
722 					struct lan78xx_statstage *stats)
723 {
724 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
725 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
726 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
727 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
728 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
729 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
730 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
731 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
732 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
733 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
734 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
735 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
736 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
737 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
738 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
739 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
740 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
741 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
742 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
745 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
746 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
747 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
748 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
749 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
750 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
751 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
752 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
753 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
754 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
755 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
756 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
757 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
758 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
759 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
760 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
761 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
762 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
763 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
764 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
765 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
766 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
769 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
770 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
771 
772 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
773 }
774 
lan78xx_update_stats(struct lan78xx_net * dev)775 static void lan78xx_update_stats(struct lan78xx_net *dev)
776 {
777 	u32 *p, *count, *max;
778 	u64 *data;
779 	int i;
780 	struct lan78xx_statstage lan78xx_stats;
781 
782 	if (usb_autopm_get_interface(dev->intf) < 0)
783 		return;
784 
785 	p = (u32 *)&lan78xx_stats;
786 	count = (u32 *)&dev->stats.rollover_count;
787 	max = (u32 *)&dev->stats.rollover_max;
788 	data = (u64 *)&dev->stats.curr_stat;
789 
790 	mutex_lock(&dev->stats.access_lock);
791 
792 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
793 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
794 
795 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
796 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
797 
798 	mutex_unlock(&dev->stats.access_lock);
799 
800 	usb_autopm_put_interface(dev->intf);
801 }
802 
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)803 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
804 {
805 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
806 }
807 
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)808 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
809 			   u32 hw_disabled)
810 {
811 	unsigned long timeout;
812 	bool stopped = true;
813 	int ret;
814 	u32 buf;
815 
816 	/* Stop the h/w block (if not already stopped) */
817 
818 	ret = lan78xx_read_reg(dev, reg, &buf);
819 	if (ret < 0)
820 		return ret;
821 
822 	if (buf & hw_enabled) {
823 		buf &= ~hw_enabled;
824 
825 		ret = lan78xx_write_reg(dev, reg, buf);
826 		if (ret < 0)
827 			return ret;
828 
829 		stopped = false;
830 		timeout = jiffies + HW_DISABLE_TIMEOUT;
831 		do  {
832 			ret = lan78xx_read_reg(dev, reg, &buf);
833 			if (ret < 0)
834 				return ret;
835 
836 			if (buf & hw_disabled)
837 				stopped = true;
838 			else
839 				msleep(HW_DISABLE_DELAY_MS);
840 		} while (!stopped && !time_after(jiffies, timeout));
841 	}
842 
843 	return stopped ? 0 : -ETIMEDOUT;
844 }
845 
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)846 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
847 {
848 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
849 }
850 
lan78xx_start_tx_path(struct lan78xx_net * dev)851 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
852 {
853 	int ret;
854 
855 	netif_dbg(dev, drv, dev->net, "start tx path");
856 
857 	/* Start the MAC transmitter */
858 
859 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
860 	if (ret < 0)
861 		return ret;
862 
863 	/* Start the Tx FIFO */
864 
865 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
866 	if (ret < 0)
867 		return ret;
868 
869 	return 0;
870 }
871 
lan78xx_stop_tx_path(struct lan78xx_net * dev)872 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
873 {
874 	int ret;
875 
876 	netif_dbg(dev, drv, dev->net, "stop tx path");
877 
878 	/* Stop the Tx FIFO */
879 
880 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Stop the MAC transmitter */
885 
886 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
887 	if (ret < 0)
888 		return ret;
889 
890 	return 0;
891 }
892 
893 /* The caller must ensure the Tx path is stopped before calling
894  * lan78xx_flush_tx_fifo().
895  */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)896 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
897 {
898 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
899 }
900 
lan78xx_start_rx_path(struct lan78xx_net * dev)901 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
902 {
903 	int ret;
904 
905 	netif_dbg(dev, drv, dev->net, "start rx path");
906 
907 	/* Start the Rx FIFO */
908 
909 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
910 	if (ret < 0)
911 		return ret;
912 
913 	/* Start the MAC receiver*/
914 
915 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
916 	if (ret < 0)
917 		return ret;
918 
919 	return 0;
920 }
921 
lan78xx_stop_rx_path(struct lan78xx_net * dev)922 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
923 {
924 	int ret;
925 
926 	netif_dbg(dev, drv, dev->net, "stop rx path");
927 
928 	/* Stop the MAC receiver */
929 
930 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
931 	if (ret < 0)
932 		return ret;
933 
934 	/* Stop the Rx FIFO */
935 
936 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
937 	if (ret < 0)
938 		return ret;
939 
940 	return 0;
941 }
942 
943 /* The caller must ensure the Rx path is stopped before calling
944  * lan78xx_flush_rx_fifo().
945  */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)946 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
947 {
948 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
949 }
950 
951 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
lan78xx_mdiobus_wait_not_busy(struct lan78xx_net * dev)952 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	unsigned long start_time = jiffies;
955 	u32 val;
956 	int ret;
957 
958 	do {
959 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
960 		if (ret < 0)
961 			return ret;
962 
963 		if (!(val & MII_ACC_MII_BUSY_))
964 			return 0;
965 	} while (!time_after(jiffies, start_time + HZ));
966 
967 	return -ETIMEDOUT;
968 }
969 
mii_access(int id,int index,int read)970 static inline u32 mii_access(int id, int index, int read)
971 {
972 	u32 ret;
973 
974 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
975 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
976 	if (read)
977 		ret |= MII_ACC_MII_READ_;
978 	else
979 		ret |= MII_ACC_MII_WRITE_;
980 	ret |= MII_ACC_MII_BUSY_;
981 
982 	return ret;
983 }
984 
lan78xx_wait_eeprom(struct lan78xx_net * dev)985 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
986 {
987 	unsigned long start_time = jiffies;
988 	u32 val;
989 	int ret;
990 
991 	do {
992 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
993 		if (ret < 0)
994 			return ret;
995 
996 		if (!(val & E2P_CMD_EPC_BUSY_) ||
997 		    (val & E2P_CMD_EPC_TIMEOUT_))
998 			break;
999 		usleep_range(40, 100);
1000 	} while (!time_after(jiffies, start_time + HZ));
1001 
1002 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1003 		netdev_warn(dev->net, "EEPROM read operation timeout");
1004 		return -ETIMEDOUT;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)1010 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1011 {
1012 	unsigned long start_time = jiffies;
1013 	u32 val;
1014 	int ret;
1015 
1016 	do {
1017 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1018 		if (ret < 0)
1019 			return ret;
1020 
1021 		if (!(val & E2P_CMD_EPC_BUSY_))
1022 			return 0;
1023 
1024 		usleep_range(40, 100);
1025 	} while (!time_after(jiffies, start_time + HZ));
1026 
1027 	netdev_warn(dev->net, "EEPROM is busy");
1028 	return -ETIMEDOUT;
1029 }
1030 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1031 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1032 				   u32 length, u8 *data)
1033 {
1034 	u32 val, saved;
1035 	int i, ret;
1036 
1037 	/* depends on chip, some EEPROM pins are muxed with LED function.
1038 	 * disable & restore LED function to access EEPROM.
1039 	 */
1040 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	saved = val;
1045 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1046 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1047 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1053 	if (ret == -ETIMEDOUT)
1054 		goto read_raw_eeprom_done;
1055 	/* If USB fails, there is nothing to do */
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	for (i = 0; i < length; i++) {
1060 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1061 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1062 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1063 		if (ret < 0)
1064 			return ret;
1065 
1066 		ret = lan78xx_wait_eeprom(dev);
1067 		/* Looks like not USB specific error, try to recover */
1068 		if (ret == -ETIMEDOUT)
1069 			goto read_raw_eeprom_done;
1070 		/* If USB fails, there is nothing to do */
1071 		if (ret < 0)
1072 			return ret;
1073 
1074 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		data[i] = val & 0xFF;
1079 		offset++;
1080 	}
1081 
1082 read_raw_eeprom_done:
1083 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1084 		int rc = lan78xx_write_reg(dev, HW_CFG, saved);
1085 		/* If USB fails, there is nothing to do */
1086 		if (rc < 0)
1087 			return rc;
1088 	}
1089 	return ret;
1090 }
1091 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1092 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1093 			       u32 length, u8 *data)
1094 {
1095 	int ret;
1096 	u8 sig;
1097 
1098 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1099 	if (ret < 0)
1100 		return ret;
1101 
1102 	if (sig != EEPROM_INDICATOR)
1103 		return -ENODATA;
1104 
1105 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1106 }
1107 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1108 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1109 				    u32 length, u8 *data)
1110 {
1111 	u32 val;
1112 	u32 saved;
1113 	int i, ret;
1114 
1115 	/* depends on chip, some EEPROM pins are muxed with LED function.
1116 	 * disable & restore LED function to access EEPROM.
1117 	 */
1118 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1119 	if (ret < 0)
1120 		return ret;
1121 
1122 	saved = val;
1123 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1124 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1125 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1126 		if (ret < 0)
1127 			return ret;
1128 	}
1129 
1130 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1131 	/* Looks like not USB specific error, try to recover */
1132 	if (ret == -ETIMEDOUT)
1133 		goto write_raw_eeprom_done;
1134 	/* If USB fails, there is nothing to do */
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	/* Issue write/erase enable command */
1139 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1140 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1141 	if (ret < 0)
1142 		return ret;
1143 
1144 	ret = lan78xx_wait_eeprom(dev);
1145 	/* Looks like not USB specific error, try to recover */
1146 	if (ret == -ETIMEDOUT)
1147 		goto write_raw_eeprom_done;
1148 	/* If USB fails, there is nothing to do */
1149 	if (ret < 0)
1150 		return ret;
1151 
1152 	for (i = 0; i < length; i++) {
1153 		/* Fill data register */
1154 		val = data[i];
1155 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1156 		if (ret < 0)
1157 			return ret;
1158 
1159 		/* Send "write" command */
1160 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1161 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1162 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1163 		if (ret < 0)
1164 			return ret;
1165 
1166 		ret = lan78xx_wait_eeprom(dev);
1167 		/* Looks like not USB specific error, try to recover */
1168 		if (ret == -ETIMEDOUT)
1169 			goto write_raw_eeprom_done;
1170 		/* If USB fails, there is nothing to do */
1171 		if (ret < 0)
1172 			return ret;
1173 
1174 		offset++;
1175 	}
1176 
1177 write_raw_eeprom_done:
1178 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1179 		int rc = lan78xx_write_reg(dev, HW_CFG, saved);
1180 		/* If USB fails, there is nothing to do */
1181 		if (rc < 0)
1182 			return rc;
1183 	}
1184 	return ret;
1185 }
1186 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1187 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1188 				u32 length, u8 *data)
1189 {
1190 	unsigned long timeout;
1191 	int ret, i;
1192 	u32 buf;
1193 
1194 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1195 	if (ret < 0)
1196 		return ret;
1197 
1198 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1199 		/* clear it and wait to be cleared */
1200 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1201 		if (ret < 0)
1202 			return ret;
1203 
1204 		timeout = jiffies + HZ;
1205 		do {
1206 			usleep_range(1, 10);
1207 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1208 			if (ret < 0)
1209 				return ret;
1210 
1211 			if (time_after(jiffies, timeout)) {
1212 				netdev_warn(dev->net,
1213 					    "timeout on OTP_PWR_DN");
1214 				return -ETIMEDOUT;
1215 			}
1216 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1217 	}
1218 
1219 	for (i = 0; i < length; i++) {
1220 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1221 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1222 		if (ret < 0)
1223 			return ret;
1224 
1225 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1226 					((offset + i) & OTP_ADDR2_10_3));
1227 		if (ret < 0)
1228 			return ret;
1229 
1230 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1231 		if (ret < 0)
1232 			return ret;
1233 
1234 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1235 		if (ret < 0)
1236 			return ret;
1237 
1238 		timeout = jiffies + HZ;
1239 		do {
1240 			udelay(1);
1241 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1242 			if (ret < 0)
1243 				return ret;
1244 
1245 			if (time_after(jiffies, timeout)) {
1246 				netdev_warn(dev->net,
1247 					    "timeout on OTP_STATUS");
1248 				return -ETIMEDOUT;
1249 			}
1250 		} while (buf & OTP_STATUS_BUSY_);
1251 
1252 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1253 		if (ret < 0)
1254 			return ret;
1255 
1256 		data[i] = (u8)(buf & 0xFF);
1257 	}
1258 
1259 	return 0;
1260 }
1261 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1262 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1263 				 u32 length, u8 *data)
1264 {
1265 	int i;
1266 	u32 buf;
1267 	unsigned long timeout;
1268 	int ret;
1269 
1270 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1271 	if (ret < 0)
1272 		return ret;
1273 
1274 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1275 		/* clear it and wait to be cleared */
1276 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1277 		if (ret < 0)
1278 			return ret;
1279 
1280 		timeout = jiffies + HZ;
1281 		do {
1282 			udelay(1);
1283 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1284 			if (ret < 0)
1285 				return ret;
1286 
1287 			if (time_after(jiffies, timeout)) {
1288 				netdev_warn(dev->net,
1289 					    "timeout on OTP_PWR_DN completion");
1290 				return -ETIMEDOUT;
1291 			}
1292 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1293 	}
1294 
1295 	/* set to BYTE program mode */
1296 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1297 	if (ret < 0)
1298 		return ret;
1299 
1300 	for (i = 0; i < length; i++) {
1301 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1302 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1303 		if (ret < 0)
1304 			return ret;
1305 
1306 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1307 					((offset + i) & OTP_ADDR2_10_3));
1308 		if (ret < 0)
1309 			return ret;
1310 
1311 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1312 		if (ret < 0)
1313 			return ret;
1314 
1315 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1320 		if (ret < 0)
1321 			return ret;
1322 
1323 		timeout = jiffies + HZ;
1324 		do {
1325 			udelay(1);
1326 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1327 			if (ret < 0)
1328 				return ret;
1329 
1330 			if (time_after(jiffies, timeout)) {
1331 				netdev_warn(dev->net,
1332 					    "Timeout on OTP_STATUS completion");
1333 				return -ETIMEDOUT;
1334 			}
1335 		} while (buf & OTP_STATUS_BUSY_);
1336 	}
1337 
1338 	return 0;
1339 }
1340 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1341 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1342 			    u32 length, u8 *data)
1343 {
1344 	u8 sig;
1345 	int ret;
1346 
1347 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1348 
1349 	if (ret == 0) {
1350 		if (sig == OTP_INDICATOR_2)
1351 			offset += 0x100;
1352 		else if (sig != OTP_INDICATOR_1)
1353 			ret = -EINVAL;
1354 		if (!ret)
1355 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1356 	}
1357 
1358 	return ret;
1359 }
1360 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1361 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1362 {
1363 	int i, ret;
1364 
1365 	for (i = 0; i < 100; i++) {
1366 		u32 dp_sel;
1367 
1368 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1369 		if (unlikely(ret < 0))
1370 			return ret;
1371 
1372 		if (dp_sel & DP_SEL_DPRDY_)
1373 			return 0;
1374 
1375 		usleep_range(40, 100);
1376 	}
1377 
1378 	netdev_warn(dev->net, "%s timed out", __func__);
1379 
1380 	return -ETIMEDOUT;
1381 }
1382 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1383 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1384 				  u32 addr, u32 length, u32 *buf)
1385 {
1386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1387 	int i, ret;
1388 
1389 	ret = usb_autopm_get_interface(dev->intf);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	mutex_lock(&pdata->dataport_mutex);
1394 
1395 	ret = lan78xx_dataport_wait_not_busy(dev);
1396 	if (ret < 0)
1397 		goto dataport_write;
1398 
1399 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1400 	if (ret < 0)
1401 		goto dataport_write;
1402 
1403 	for (i = 0; i < length; i++) {
1404 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1405 		if (ret < 0)
1406 			goto dataport_write;
1407 
1408 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1409 		if (ret < 0)
1410 			goto dataport_write;
1411 
1412 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1413 		if (ret < 0)
1414 			goto dataport_write;
1415 
1416 		ret = lan78xx_dataport_wait_not_busy(dev);
1417 		if (ret < 0)
1418 			goto dataport_write;
1419 	}
1420 
1421 dataport_write:
1422 	if (ret < 0)
1423 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1424 
1425 	mutex_unlock(&pdata->dataport_mutex);
1426 	usb_autopm_put_interface(dev->intf);
1427 
1428 	return ret;
1429 }
1430 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1431 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1432 				    int index, u8 addr[ETH_ALEN])
1433 {
1434 	u32 temp;
1435 
1436 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1437 		temp = addr[3];
1438 		temp = addr[2] | (temp << 8);
1439 		temp = addr[1] | (temp << 8);
1440 		temp = addr[0] | (temp << 8);
1441 		pdata->pfilter_table[index][1] = temp;
1442 		temp = addr[5];
1443 		temp = addr[4] | (temp << 8);
1444 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1445 		pdata->pfilter_table[index][0] = temp;
1446 	}
1447 }
1448 
1449 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1450 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1451 {
1452 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1453 }
1454 
lan78xx_deferred_multicast_write(struct work_struct * param)1455 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1456 {
1457 	struct lan78xx_priv *pdata =
1458 			container_of(param, struct lan78xx_priv, set_multicast);
1459 	struct lan78xx_net *dev = pdata->dev;
1460 	int i, ret;
1461 
1462 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1463 		  pdata->rfe_ctl);
1464 
1465 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1466 				     DP_SEL_VHF_VLAN_LEN,
1467 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1468 	if (ret < 0)
1469 		goto multicast_write_done;
1470 
1471 	for (i = 1; i < NUM_OF_MAF; i++) {
1472 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1473 		if (ret < 0)
1474 			goto multicast_write_done;
1475 
1476 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1477 					pdata->pfilter_table[i][1]);
1478 		if (ret < 0)
1479 			goto multicast_write_done;
1480 
1481 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1482 					pdata->pfilter_table[i][0]);
1483 		if (ret < 0)
1484 			goto multicast_write_done;
1485 	}
1486 
1487 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1488 
1489 multicast_write_done:
1490 	if (ret < 0)
1491 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1492 	return;
1493 }
1494 
lan78xx_set_multicast(struct net_device * netdev)1495 static void lan78xx_set_multicast(struct net_device *netdev)
1496 {
1497 	struct lan78xx_net *dev = netdev_priv(netdev);
1498 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1499 	unsigned long flags;
1500 	int i;
1501 
1502 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1503 
1504 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1505 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1506 
1507 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1508 		pdata->mchash_table[i] = 0;
1509 
1510 	/* pfilter_table[0] has own HW address */
1511 	for (i = 1; i < NUM_OF_MAF; i++) {
1512 		pdata->pfilter_table[i][0] = 0;
1513 		pdata->pfilter_table[i][1] = 0;
1514 	}
1515 
1516 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1517 
1518 	if (dev->net->flags & IFF_PROMISC) {
1519 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1520 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1521 	} else {
1522 		if (dev->net->flags & IFF_ALLMULTI) {
1523 			netif_dbg(dev, drv, dev->net,
1524 				  "receive all multicast enabled");
1525 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1526 		}
1527 	}
1528 
1529 	if (netdev_mc_count(dev->net)) {
1530 		struct netdev_hw_addr *ha;
1531 		int i;
1532 
1533 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1534 
1535 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1536 
1537 		i = 1;
1538 		netdev_for_each_mc_addr(ha, netdev) {
1539 			/* set first 32 into Perfect Filter */
1540 			if (i < 33) {
1541 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1542 			} else {
1543 				u32 bitnum = lan78xx_hash(ha->addr);
1544 
1545 				pdata->mchash_table[bitnum / 32] |=
1546 							(1 << (bitnum % 32));
1547 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1548 			}
1549 			i++;
1550 		}
1551 	}
1552 
1553 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1554 
1555 	/* defer register writes to a sleepable context */
1556 	schedule_work(&pdata->set_multicast);
1557 }
1558 
1559 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1560 
lan78xx_mac_reset(struct lan78xx_net * dev)1561 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1562 {
1563 	unsigned long start_time = jiffies;
1564 	u32 val;
1565 	int ret;
1566 
1567 	mutex_lock(&dev->mdiobus_mutex);
1568 
1569 	/* Resetting the device while there is activity on the MDIO
1570 	 * bus can result in the MAC interface locking up and not
1571 	 * completing register access transactions.
1572 	 */
1573 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1574 	if (ret < 0)
1575 		goto exit_unlock;
1576 
1577 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1578 	if (ret < 0)
1579 		goto exit_unlock;
1580 
1581 	val |= MAC_CR_RST_;
1582 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1583 	if (ret < 0)
1584 		goto exit_unlock;
1585 
1586 	/* Wait for the reset to complete before allowing any further
1587 	 * MAC register accesses otherwise the MAC may lock up.
1588 	 */
1589 	do {
1590 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1591 		if (ret < 0)
1592 			goto exit_unlock;
1593 
1594 		if (!(val & MAC_CR_RST_)) {
1595 			ret = 0;
1596 			goto exit_unlock;
1597 		}
1598 	} while (!time_after(jiffies, start_time + HZ));
1599 
1600 	ret = -ETIMEDOUT;
1601 exit_unlock:
1602 	mutex_unlock(&dev->mdiobus_mutex);
1603 
1604 	return ret;
1605 }
1606 
1607 /**
1608  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1609  * @dev: pointer to the LAN78xx device structure
1610  *
1611  * This function acknowledges the PHY interrupt by setting the
1612  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1613  *
1614  * Return: 0 on success or a negative error code on failure.
1615  */
lan78xx_phy_int_ack(struct lan78xx_net * dev)1616 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1617 {
1618 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1619 }
1620 
1621 /* some work can't be done in tasklets, so we use keventd
1622  *
1623  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1624  * but tasklet_schedule() doesn't.	hope the failure is rare.
1625  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1626 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1627 {
1628 	set_bit(work, &dev->flags);
1629 	if (!schedule_delayed_work(&dev->wq, 0))
1630 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1631 }
1632 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1633 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1634 {
1635 	u32 intdata;
1636 
1637 	if (urb->actual_length != 4) {
1638 		netdev_warn(dev->net,
1639 			    "unexpected urb length %d", urb->actual_length);
1640 		return;
1641 	}
1642 
1643 	intdata = get_unaligned_le32(urb->transfer_buffer);
1644 
1645 	if (intdata & INT_ENP_PHY_INT) {
1646 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1647 		lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1648 
1649 		if (dev->domain_data.phyirq > 0)
1650 			generic_handle_irq_safe(dev->domain_data.phyirq);
1651 	} else {
1652 		netdev_warn(dev->net,
1653 			    "unexpected interrupt: 0x%08x\n", intdata);
1654 	}
1655 }
1656 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1657 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1658 {
1659 	return MAX_EEPROM_SIZE;
1660 }
1661 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1662 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1663 				      struct ethtool_eeprom *ee, u8 *data)
1664 {
1665 	struct lan78xx_net *dev = netdev_priv(netdev);
1666 	int ret;
1667 
1668 	ret = usb_autopm_get_interface(dev->intf);
1669 	if (ret)
1670 		return ret;
1671 
1672 	ee->magic = LAN78XX_EEPROM_MAGIC;
1673 
1674 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1675 
1676 	usb_autopm_put_interface(dev->intf);
1677 
1678 	return ret;
1679 }
1680 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1681 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1682 				      struct ethtool_eeprom *ee, u8 *data)
1683 {
1684 	struct lan78xx_net *dev = netdev_priv(netdev);
1685 	int ret;
1686 
1687 	ret = usb_autopm_get_interface(dev->intf);
1688 	if (ret)
1689 		return ret;
1690 
1691 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1692 	 * to load data from EEPROM
1693 	 */
1694 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1695 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1696 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1697 		 (ee->offset == 0) &&
1698 		 (ee->len == 512) &&
1699 		 (data[0] == OTP_INDICATOR_1))
1700 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1701 
1702 	usb_autopm_put_interface(dev->intf);
1703 
1704 	return ret;
1705 }
1706 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1707 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1708 				u8 *data)
1709 {
1710 	if (stringset == ETH_SS_STATS)
1711 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1712 	else if (stringset == ETH_SS_TEST)
1713 		net_selftest_get_strings(data);
1714 }
1715 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1716 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1717 {
1718 	if (sset == ETH_SS_STATS)
1719 		return ARRAY_SIZE(lan78xx_gstrings);
1720 	else if (sset == ETH_SS_TEST)
1721 		return net_selftest_get_count();
1722 	else
1723 		return -EOPNOTSUPP;
1724 }
1725 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1726 static void lan78xx_get_stats(struct net_device *netdev,
1727 			      struct ethtool_stats *stats, u64 *data)
1728 {
1729 	struct lan78xx_net *dev = netdev_priv(netdev);
1730 
1731 	lan78xx_update_stats(dev);
1732 
1733 	mutex_lock(&dev->stats.access_lock);
1734 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1735 	mutex_unlock(&dev->stats.access_lock);
1736 }
1737 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1738 static void lan78xx_get_wol(struct net_device *netdev,
1739 			    struct ethtool_wolinfo *wol)
1740 {
1741 	struct lan78xx_net *dev = netdev_priv(netdev);
1742 	int ret;
1743 	u32 buf;
1744 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1745 
1746 	if (usb_autopm_get_interface(dev->intf) < 0)
1747 		return;
1748 
1749 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1750 	if (unlikely(ret < 0)) {
1751 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1752 		wol->supported = 0;
1753 		wol->wolopts = 0;
1754 	} else {
1755 		if (buf & USB_CFG_RMT_WKP_) {
1756 			wol->supported = WAKE_ALL;
1757 			wol->wolopts = pdata->wol;
1758 		} else {
1759 			wol->supported = 0;
1760 			wol->wolopts = 0;
1761 		}
1762 	}
1763 
1764 	usb_autopm_put_interface(dev->intf);
1765 }
1766 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1767 static int lan78xx_set_wol(struct net_device *netdev,
1768 			   struct ethtool_wolinfo *wol)
1769 {
1770 	struct lan78xx_net *dev = netdev_priv(netdev);
1771 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1772 	int ret;
1773 
1774 	if (wol->wolopts & ~WAKE_ALL)
1775 		return -EINVAL;
1776 
1777 	ret = usb_autopm_get_interface(dev->intf);
1778 	if (ret < 0)
1779 		return ret;
1780 
1781 	pdata->wol = wol->wolopts;
1782 
1783 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1784 	if (ret < 0)
1785 		goto exit_pm_put;
1786 
1787 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1788 
1789 exit_pm_put:
1790 	usb_autopm_put_interface(dev->intf);
1791 
1792 	return ret;
1793 }
1794 
lan78xx_get_eee(struct net_device * net,struct ethtool_keee * edata)1795 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1796 {
1797 	struct lan78xx_net *dev = netdev_priv(net);
1798 
1799 	return phylink_ethtool_get_eee(dev->phylink, edata);
1800 }
1801 
lan78xx_set_eee(struct net_device * net,struct ethtool_keee * edata)1802 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1803 {
1804 	struct lan78xx_net *dev = netdev_priv(net);
1805 
1806 	return phylink_ethtool_set_eee(dev->phylink, edata);
1807 }
1808 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1809 static void lan78xx_get_drvinfo(struct net_device *net,
1810 				struct ethtool_drvinfo *info)
1811 {
1812 	struct lan78xx_net *dev = netdev_priv(net);
1813 
1814 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1815 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1816 }
1817 
lan78xx_get_msglevel(struct net_device * net)1818 static u32 lan78xx_get_msglevel(struct net_device *net)
1819 {
1820 	struct lan78xx_net *dev = netdev_priv(net);
1821 
1822 	return dev->msg_enable;
1823 }
1824 
lan78xx_set_msglevel(struct net_device * net,u32 level)1825 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1826 {
1827 	struct lan78xx_net *dev = netdev_priv(net);
1828 
1829 	dev->msg_enable = level;
1830 }
1831 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1832 static int lan78xx_get_link_ksettings(struct net_device *net,
1833 				      struct ethtool_link_ksettings *cmd)
1834 {
1835 	struct lan78xx_net *dev = netdev_priv(net);
1836 
1837 	return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1838 }
1839 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1840 static int lan78xx_set_link_ksettings(struct net_device *net,
1841 				      const struct ethtool_link_ksettings *cmd)
1842 {
1843 	struct lan78xx_net *dev = netdev_priv(net);
1844 
1845 	return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1846 }
1847 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1848 static void lan78xx_get_pause(struct net_device *net,
1849 			      struct ethtool_pauseparam *pause)
1850 {
1851 	struct lan78xx_net *dev = netdev_priv(net);
1852 
1853 	phylink_ethtool_get_pauseparam(dev->phylink, pause);
1854 }
1855 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1856 static int lan78xx_set_pause(struct net_device *net,
1857 			     struct ethtool_pauseparam *pause)
1858 {
1859 	struct lan78xx_net *dev = netdev_priv(net);
1860 
1861 	return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1862 }
1863 
lan78xx_get_regs_len(struct net_device * netdev)1864 static int lan78xx_get_regs_len(struct net_device *netdev)
1865 {
1866 	return sizeof(lan78xx_regs);
1867 }
1868 
1869 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1870 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1871 		 void *buf)
1872 {
1873 	struct lan78xx_net *dev = netdev_priv(netdev);
1874 	unsigned int data_count = 0;
1875 	u32 *data = buf;
1876 	int i, ret;
1877 
1878 	/* Read Device/MAC registers */
1879 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1880 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1881 		if (ret < 0) {
1882 			netdev_warn(dev->net,
1883 				    "failed to read register 0x%08x\n",
1884 				    lan78xx_regs[i]);
1885 			goto clean_data;
1886 		}
1887 
1888 		data_count++;
1889 	}
1890 
1891 	return;
1892 
1893 clean_data:
1894 	memset(data, 0, data_count * sizeof(u32));
1895 }
1896 
1897 static const struct ethtool_ops lan78xx_ethtool_ops = {
1898 	.get_link	= ethtool_op_get_link,
1899 	.nway_reset	= phy_ethtool_nway_reset,
1900 	.get_drvinfo	= lan78xx_get_drvinfo,
1901 	.get_msglevel	= lan78xx_get_msglevel,
1902 	.set_msglevel	= lan78xx_set_msglevel,
1903 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1904 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1905 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1906 	.get_ethtool_stats = lan78xx_get_stats,
1907 	.get_sset_count = lan78xx_get_sset_count,
1908 	.self_test	= net_selftest,
1909 	.get_strings	= lan78xx_get_strings,
1910 	.get_wol	= lan78xx_get_wol,
1911 	.set_wol	= lan78xx_set_wol,
1912 	.get_ts_info	= ethtool_op_get_ts_info,
1913 	.get_eee	= lan78xx_get_eee,
1914 	.set_eee	= lan78xx_set_eee,
1915 	.get_pauseparam	= lan78xx_get_pause,
1916 	.set_pauseparam	= lan78xx_set_pause,
1917 	.get_link_ksettings = lan78xx_get_link_ksettings,
1918 	.set_link_ksettings = lan78xx_set_link_ksettings,
1919 	.get_regs_len	= lan78xx_get_regs_len,
1920 	.get_regs	= lan78xx_get_regs,
1921 };
1922 
lan78xx_init_mac_address(struct lan78xx_net * dev)1923 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1924 {
1925 	u32 addr_lo, addr_hi;
1926 	u8 addr[6];
1927 	int ret;
1928 
1929 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1930 	if (ret < 0)
1931 		return ret;
1932 
1933 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1934 	if (ret < 0)
1935 		return ret;
1936 
1937 	addr[0] = addr_lo & 0xFF;
1938 	addr[1] = (addr_lo >> 8) & 0xFF;
1939 	addr[2] = (addr_lo >> 16) & 0xFF;
1940 	addr[3] = (addr_lo >> 24) & 0xFF;
1941 	addr[4] = addr_hi & 0xFF;
1942 	addr[5] = (addr_hi >> 8) & 0xFF;
1943 
1944 	if (!is_valid_ether_addr(addr)) {
1945 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1946 			/* valid address present in Device Tree */
1947 			netif_dbg(dev, ifup, dev->net,
1948 				  "MAC address read from Device Tree");
1949 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1950 						 ETH_ALEN, addr) == 0) ||
1951 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1952 					      ETH_ALEN, addr) == 0)) &&
1953 			   is_valid_ether_addr(addr)) {
1954 			/* eeprom values are valid so use them */
1955 			netif_dbg(dev, ifup, dev->net,
1956 				  "MAC address read from EEPROM");
1957 		} else {
1958 			/* generate random MAC */
1959 			eth_random_addr(addr);
1960 			netif_dbg(dev, ifup, dev->net,
1961 				  "MAC address set to random addr");
1962 		}
1963 
1964 		addr_lo = addr[0] | (addr[1] << 8) |
1965 			  (addr[2] << 16) | (addr[3] << 24);
1966 		addr_hi = addr[4] | (addr[5] << 8);
1967 
1968 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1969 		if (ret < 0)
1970 			return ret;
1971 
1972 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1973 		if (ret < 0)
1974 			return ret;
1975 	}
1976 
1977 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1978 	if (ret < 0)
1979 		return ret;
1980 
1981 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1982 	if (ret < 0)
1983 		return ret;
1984 
1985 	eth_hw_addr_set(dev->net, addr);
1986 
1987 	return 0;
1988 }
1989 
1990 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1991 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1992 {
1993 	struct lan78xx_net *dev = bus->priv;
1994 	u32 val, addr;
1995 	int ret;
1996 
1997 	ret = usb_autopm_get_interface(dev->intf);
1998 	if (ret < 0)
1999 		return ret;
2000 
2001 	mutex_lock(&dev->mdiobus_mutex);
2002 
2003 	/* confirm MII not busy */
2004 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2005 	if (ret < 0)
2006 		goto done;
2007 
2008 	/* set the address, index & direction (read from PHY) */
2009 	addr = mii_access(phy_id, idx, MII_READ);
2010 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2011 	if (ret < 0)
2012 		goto done;
2013 
2014 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2015 	if (ret < 0)
2016 		goto done;
2017 
2018 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2019 	if (ret < 0)
2020 		goto done;
2021 
2022 	ret = (int)(val & 0xFFFF);
2023 
2024 done:
2025 	mutex_unlock(&dev->mdiobus_mutex);
2026 	usb_autopm_put_interface(dev->intf);
2027 
2028 	return ret;
2029 }
2030 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2031 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2032 				 u16 regval)
2033 {
2034 	struct lan78xx_net *dev = bus->priv;
2035 	u32 val, addr;
2036 	int ret;
2037 
2038 	ret = usb_autopm_get_interface(dev->intf);
2039 	if (ret < 0)
2040 		return ret;
2041 
2042 	mutex_lock(&dev->mdiobus_mutex);
2043 
2044 	/* confirm MII not busy */
2045 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2046 	if (ret < 0)
2047 		goto done;
2048 
2049 	val = (u32)regval;
2050 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2051 	if (ret < 0)
2052 		goto done;
2053 
2054 	/* set the address, index & direction (write to PHY) */
2055 	addr = mii_access(phy_id, idx, MII_WRITE);
2056 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2057 	if (ret < 0)
2058 		goto done;
2059 
2060 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2061 	if (ret < 0)
2062 		goto done;
2063 
2064 done:
2065 	mutex_unlock(&dev->mdiobus_mutex);
2066 	usb_autopm_put_interface(dev->intf);
2067 	return ret;
2068 }
2069 
lan78xx_mdio_init(struct lan78xx_net * dev)2070 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2071 {
2072 	struct device_node *node;
2073 	int ret;
2074 
2075 	dev->mdiobus = mdiobus_alloc();
2076 	if (!dev->mdiobus) {
2077 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2078 		return -ENOMEM;
2079 	}
2080 
2081 	dev->mdiobus->priv = (void *)dev;
2082 	dev->mdiobus->read = lan78xx_mdiobus_read;
2083 	dev->mdiobus->write = lan78xx_mdiobus_write;
2084 	dev->mdiobus->name = "lan78xx-mdiobus";
2085 	dev->mdiobus->parent = &dev->udev->dev;
2086 
2087 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2088 		 dev->udev->bus->busnum, dev->udev->devnum);
2089 
2090 	switch (dev->chipid) {
2091 	case ID_REV_CHIP_ID_7800_:
2092 	case ID_REV_CHIP_ID_7850_:
2093 		/* set to internal PHY id */
2094 		dev->mdiobus->phy_mask = ~(1 << 1);
2095 		break;
2096 	case ID_REV_CHIP_ID_7801_:
2097 		break;
2098 	}
2099 
2100 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2101 	ret = of_mdiobus_register(dev->mdiobus, node);
2102 	of_node_put(node);
2103 	if (ret) {
2104 		netdev_err(dev->net, "can't register MDIO bus\n");
2105 		goto exit1;
2106 	}
2107 
2108 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2109 	return 0;
2110 exit1:
2111 	mdiobus_free(dev->mdiobus);
2112 	return ret;
2113 }
2114 
lan78xx_remove_mdio(struct lan78xx_net * dev)2115 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2116 {
2117 	mdiobus_unregister(dev->mdiobus);
2118 	mdiobus_free(dev->mdiobus);
2119 }
2120 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2121 static int irq_map(struct irq_domain *d, unsigned int irq,
2122 		   irq_hw_number_t hwirq)
2123 {
2124 	struct irq_domain_data *data = d->host_data;
2125 
2126 	irq_set_chip_data(irq, data);
2127 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2128 	irq_set_noprobe(irq);
2129 
2130 	return 0;
2131 }
2132 
irq_unmap(struct irq_domain * d,unsigned int irq)2133 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2134 {
2135 	irq_set_chip_and_handler(irq, NULL, NULL);
2136 	irq_set_chip_data(irq, NULL);
2137 }
2138 
2139 static const struct irq_domain_ops chip_domain_ops = {
2140 	.map	= irq_map,
2141 	.unmap	= irq_unmap,
2142 };
2143 
lan78xx_irq_mask(struct irq_data * irqd)2144 static void lan78xx_irq_mask(struct irq_data *irqd)
2145 {
2146 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2147 
2148 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2149 }
2150 
lan78xx_irq_unmask(struct irq_data * irqd)2151 static void lan78xx_irq_unmask(struct irq_data *irqd)
2152 {
2153 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2154 
2155 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2156 }
2157 
lan78xx_irq_bus_lock(struct irq_data * irqd)2158 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2159 {
2160 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2161 
2162 	mutex_lock(&data->irq_lock);
2163 }
2164 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2165 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2166 {
2167 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2168 	struct lan78xx_net *dev =
2169 			container_of(data, struct lan78xx_net, domain_data);
2170 	u32 buf;
2171 	int ret;
2172 
2173 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2174 	 * are only two callbacks executed in non-atomic contex.
2175 	 */
2176 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2177 	if (ret < 0)
2178 		goto irq_bus_sync_unlock;
2179 
2180 	if (buf != data->irqenable)
2181 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2182 
2183 irq_bus_sync_unlock:
2184 	if (ret < 0)
2185 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2186 			   ERR_PTR(ret));
2187 
2188 	mutex_unlock(&data->irq_lock);
2189 }
2190 
2191 static struct irq_chip lan78xx_irqchip = {
2192 	.name			= "lan78xx-irqs",
2193 	.irq_mask		= lan78xx_irq_mask,
2194 	.irq_unmask		= lan78xx_irq_unmask,
2195 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2196 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2197 };
2198 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2199 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2200 {
2201 	struct irq_domain *irqdomain;
2202 	unsigned int irqmap = 0;
2203 	u32 buf;
2204 	int ret = 0;
2205 
2206 	mutex_init(&dev->domain_data.irq_lock);
2207 
2208 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2209 	if (ret < 0)
2210 		return ret;
2211 
2212 	dev->domain_data.irqenable = buf;
2213 
2214 	dev->domain_data.irqchip = &lan78xx_irqchip;
2215 	dev->domain_data.irq_handler = handle_simple_irq;
2216 
2217 	irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2218 					     &chip_domain_ops, &dev->domain_data);
2219 	if (irqdomain) {
2220 		/* create mapping for PHY interrupt */
2221 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2222 		if (!irqmap) {
2223 			irq_domain_remove(irqdomain);
2224 
2225 			irqdomain = NULL;
2226 			ret = -EINVAL;
2227 		}
2228 	} else {
2229 		ret = -EINVAL;
2230 	}
2231 
2232 	dev->domain_data.irqdomain = irqdomain;
2233 	dev->domain_data.phyirq = irqmap;
2234 
2235 	return ret;
2236 }
2237 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2238 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2239 {
2240 	if (dev->domain_data.phyirq > 0) {
2241 		irq_dispose_mapping(dev->domain_data.phyirq);
2242 
2243 		if (dev->domain_data.irqdomain)
2244 			irq_domain_remove(dev->domain_data.irqdomain);
2245 	}
2246 	dev->domain_data.phyirq = 0;
2247 	dev->domain_data.irqdomain = NULL;
2248 }
2249 
lan78xx_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2250 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2251 			       const struct phylink_link_state *state)
2252 {
2253 	struct net_device *net = to_net_dev(config->dev);
2254 	struct lan78xx_net *dev = netdev_priv(net);
2255 	u32 mac_cr = 0;
2256 	int ret;
2257 
2258 	/* Check if the mode is supported */
2259 	if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2260 		netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2261 		return;
2262 	}
2263 
2264 	switch (state->interface) {
2265 	case PHY_INTERFACE_MODE_GMII:
2266 		mac_cr |= MAC_CR_GMII_EN_;
2267 		break;
2268 	case PHY_INTERFACE_MODE_RGMII:
2269 	case PHY_INTERFACE_MODE_RGMII_ID:
2270 	case PHY_INTERFACE_MODE_RGMII_TXID:
2271 	case PHY_INTERFACE_MODE_RGMII_RXID:
2272 		break;
2273 	default:
2274 		netdev_warn(net, "Unsupported interface mode: %d\n",
2275 			    state->interface);
2276 		return;
2277 	}
2278 
2279 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2280 	if (ret < 0)
2281 		netdev_err(net, "Failed to config MAC with error %pe\n",
2282 			   ERR_PTR(ret));
2283 }
2284 
lan78xx_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2285 static void lan78xx_mac_link_down(struct phylink_config *config,
2286 				  unsigned int mode, phy_interface_t interface)
2287 {
2288 	struct net_device *net = to_net_dev(config->dev);
2289 	struct lan78xx_net *dev = netdev_priv(net);
2290 	int ret;
2291 
2292 	netif_stop_queue(net);
2293 
2294 	/* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2295 	 * manually before reset. TX and RX should be disabled before running
2296 	 * link_up sequence.
2297 	 */
2298 	ret = lan78xx_stop_tx_path(dev);
2299 	if (ret < 0)
2300 		goto link_down_fail;
2301 
2302 	ret = lan78xx_stop_rx_path(dev);
2303 	if (ret < 0)
2304 		goto link_down_fail;
2305 
2306 	/* MAC reset seems to not affect MAC configuration, no idea if it is
2307 	 * really needed, but it was done in previous driver version. So, leave
2308 	 * it here.
2309 	 */
2310 	ret = lan78xx_mac_reset(dev);
2311 	if (ret < 0)
2312 		goto link_down_fail;
2313 
2314 	return;
2315 
2316 link_down_fail:
2317 	netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2318 		   ERR_PTR(ret));
2319 }
2320 
2321 /**
2322  * lan78xx_configure_usb - Configure USB link power settings
2323  * @dev: pointer to the LAN78xx device structure
2324  * @speed: negotiated Ethernet link speed (in Mbps)
2325  *
2326  * This function configures U1/U2 link power management for SuperSpeed
2327  * USB devices based on the current Ethernet link speed. It uses the
2328  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2329  *
2330  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2331  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2332  *
2333  * Return: 0 on success or a negative error code on failure.
2334  */
lan78xx_configure_usb(struct lan78xx_net * dev,int speed)2335 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2336 {
2337 	u32 mask, val;
2338 	int ret;
2339 
2340 	/* Only configure USB settings for SuperSpeed devices */
2341 	if (dev->udev->speed != USB_SPEED_SUPER)
2342 		return 0;
2343 
2344 	/* LAN7850 does not support USB 3.x */
2345 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2346 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2347 		return 0;
2348 	}
2349 
2350 	switch (speed) {
2351 	case SPEED_1000:
2352 		/* Disable U2, enable U1 */
2353 		ret = lan78xx_update_reg(dev, USB_CFG1,
2354 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2355 		if (ret < 0)
2356 			return ret;
2357 
2358 		return lan78xx_update_reg(dev, USB_CFG1,
2359 					  USB_CFG1_DEV_U1_INIT_EN_,
2360 					  USB_CFG1_DEV_U1_INIT_EN_);
2361 
2362 	case SPEED_100:
2363 	case SPEED_10:
2364 		/* Enable both U1 and U2 */
2365 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2366 		val = mask;
2367 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2368 
2369 	default:
2370 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2371 		return -EINVAL;
2372 	}
2373 }
2374 
2375 /**
2376  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2377  * @dev: pointer to the LAN78xx device structure
2378  * @tx_pause: enable transmission of pause frames
2379  * @rx_pause: enable reception of pause frames
2380  *
2381  * This function configures the LAN78xx flow control settings by writing
2382  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2383  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2384  * based on USB speed.
2385  *
2386  * The Pause Time field is measured in units of 512-bit times (quanta):
2387  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2388  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2389  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2390  *
2391  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2392  *   - RXUSED is the number of bytes used in the RX FIFO
2393  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2394  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2395  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2396  *
2397  * Thresholds differ by USB speed because available USB bandwidth
2398  * affects how fast packets can be drained from the RX FIFO:
2399  *   - USB 3.x (SuperSpeed):
2400  *       FLOW_ON  = 9216 bytes → 18 units
2401  *       FLOW_OFF = 4096 bytes →  8 units
2402  *   - USB 2.0 (High-Speed):
2403  *       FLOW_ON  = 8704 bytes → 17 units
2404  *       FLOW_OFF = 1024 bytes →  2 units
2405  *
2406  * Note: The FCT_FLOW register must be configured before enabling TX pause
2407  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2408  *
2409  * Return: 0 on success or a negative error code on failure.
2410  */
lan78xx_configure_flowcontrol(struct lan78xx_net * dev,bool tx_pause,bool rx_pause)2411 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2412 					 bool tx_pause, bool rx_pause)
2413 {
2414 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2415 	const u32 pause_time_quanta = 65535;
2416 	u32 fct_flow = 0;
2417 	u32 flow = 0;
2418 	int ret;
2419 
2420 	/* Prepare MAC flow control bits */
2421 	if (tx_pause)
2422 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2423 
2424 	if (rx_pause)
2425 		flow |= FLOW_CR_RX_FCEN_;
2426 
2427 	/* Select RX FIFO thresholds based on USB speed
2428 	 *
2429 	 * FCT_FLOW layout:
2430 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2431 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2432 	 *   thresholds are expressed in units of 512 bytes
2433 	 */
2434 	switch (dev->udev->speed) {
2435 	case USB_SPEED_SUPER:
2436 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2437 		break;
2438 	case USB_SPEED_HIGH:
2439 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2440 		break;
2441 	default:
2442 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2443 			    dev->udev->speed);
2444 		return -EINVAL;
2445 	}
2446 
2447 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2448 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2449 	if (ret < 0)
2450 		return ret;
2451 
2452 	/* Step 2: Enable MAC pause functionality */
2453 	return lan78xx_write_reg(dev, FLOW, flow);
2454 }
2455 
lan78xx_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2456 static void lan78xx_mac_link_up(struct phylink_config *config,
2457 				struct phy_device *phy,
2458 				unsigned int mode, phy_interface_t interface,
2459 				int speed, int duplex,
2460 				bool tx_pause, bool rx_pause)
2461 {
2462 	struct net_device *net = to_net_dev(config->dev);
2463 	struct lan78xx_net *dev = netdev_priv(net);
2464 	u32 mac_cr = 0;
2465 	int ret;
2466 
2467 	switch (speed) {
2468 	case SPEED_1000:
2469 		mac_cr |= MAC_CR_SPEED_1000_;
2470 		break;
2471 	case SPEED_100:
2472 		mac_cr |= MAC_CR_SPEED_100_;
2473 		break;
2474 	case SPEED_10:
2475 		mac_cr |= MAC_CR_SPEED_10_;
2476 		break;
2477 	default:
2478 		netdev_err(dev->net, "Unsupported speed %d\n", speed);
2479 		return;
2480 	}
2481 
2482 	if (duplex == DUPLEX_FULL)
2483 		mac_cr |= MAC_CR_FULL_DUPLEX_;
2484 
2485 	/* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2486 	ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2487 				 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2488 	if (ret < 0)
2489 		goto link_up_fail;
2490 
2491 	ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2492 	if (ret < 0)
2493 		goto link_up_fail;
2494 
2495 	ret = lan78xx_configure_usb(dev, speed);
2496 	if (ret < 0)
2497 		goto link_up_fail;
2498 
2499 	lan78xx_rx_urb_submit_all(dev);
2500 
2501 	ret = lan78xx_flush_rx_fifo(dev);
2502 	if (ret < 0)
2503 		goto link_up_fail;
2504 
2505 	ret = lan78xx_flush_tx_fifo(dev);
2506 	if (ret < 0)
2507 		goto link_up_fail;
2508 
2509 	ret = lan78xx_start_tx_path(dev);
2510 	if (ret < 0)
2511 		goto link_up_fail;
2512 
2513 	ret = lan78xx_start_rx_path(dev);
2514 	if (ret < 0)
2515 		goto link_up_fail;
2516 
2517 	netif_start_queue(net);
2518 
2519 	return;
2520 
2521 link_up_fail:
2522 	netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2523 		   ERR_PTR(ret));
2524 }
2525 
2526 /**
2527  * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2528  * @dev: LAN78xx device
2529  * @enable: true to enable EEE, false to disable
2530  *
2531  * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2532  * Efficient Ethernet (EEE) operation. According to current understanding
2533  * of the LAN7800 documentation, this bit can be modified while TX and RX
2534  * are enabled. No explicit requirement was found to disable data paths
2535  * before changing this bit.
2536  *
2537  * Return: 0 on success or a negative error code
2538  */
lan78xx_mac_eee_enable(struct lan78xx_net * dev,bool enable)2539 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2540 {
2541 	u32 mac_cr = 0;
2542 
2543 	if (enable)
2544 		mac_cr |= MAC_CR_EEE_EN_;
2545 
2546 	return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2547 }
2548 
lan78xx_mac_disable_tx_lpi(struct phylink_config * config)2549 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2550 {
2551 	struct net_device *net = to_net_dev(config->dev);
2552 	struct lan78xx_net *dev = netdev_priv(net);
2553 
2554 	lan78xx_mac_eee_enable(dev, false);
2555 }
2556 
lan78xx_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)2557 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2558 				     bool tx_clk_stop)
2559 {
2560 	struct net_device *net = to_net_dev(config->dev);
2561 	struct lan78xx_net *dev = netdev_priv(net);
2562 	int ret;
2563 
2564 	/* Software should only change this field when Energy Efficient
2565 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2566 	 * EEEEN during probe, and phylink itself guarantees that
2567 	 * mac_disable_tx_lpi() will have been previously called.
2568 	 */
2569 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2570 	if (ret < 0)
2571 		return ret;
2572 
2573 	return lan78xx_mac_eee_enable(dev, true);
2574 }
2575 
2576 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2577 	.mac_config = lan78xx_mac_config,
2578 	.mac_link_down = lan78xx_mac_link_down,
2579 	.mac_link_up = lan78xx_mac_link_up,
2580 	.mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2581 	.mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2582 };
2583 
2584 /**
2585  * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2586  * @dev: LAN78xx device
2587  *
2588  * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2589  * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2590  * to a switch without a visible PHY.
2591  *
2592  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2593  */
lan78xx_set_fixed_link(struct lan78xx_net * dev)2594 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2595 {
2596 	static const struct phylink_link_state state = {
2597 		.speed = SPEED_1000,
2598 		.duplex = DUPLEX_FULL,
2599 	};
2600 
2601 	netdev_info(dev->net,
2602 		    "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2603 
2604 	return phylink_set_fixed_link(dev->phylink, &state);
2605 }
2606 
2607 /**
2608  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2609  * @dev: LAN78xx device structure
2610  *
2611  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2612  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2613  * sets dev->interface based on chip ID and detected PHY type.
2614  *
2615  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2616  */
lan78xx_get_phy(struct lan78xx_net * dev)2617 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2618 {
2619 	struct phy_device *phydev;
2620 
2621 	/* Attempt to locate a PHY on the MDIO bus */
2622 	phydev = phy_find_first(dev->mdiobus);
2623 
2624 	switch (dev->chipid) {
2625 	case ID_REV_CHIP_ID_7801_:
2626 		if (phydev) {
2627 			/* External RGMII PHY detected */
2628 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2629 			phydev->is_internal = false;
2630 
2631 			if (!phydev->drv)
2632 				netdev_warn(dev->net,
2633 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2634 
2635 			return phydev;
2636 		}
2637 
2638 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2639 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2640 		return NULL;
2641 
2642 	case ID_REV_CHIP_ID_7800_:
2643 	case ID_REV_CHIP_ID_7850_:
2644 		if (!phydev)
2645 			return ERR_PTR(-ENODEV);
2646 
2647 		/* These use internal GMII-connected PHY */
2648 		dev->interface = PHY_INTERFACE_MODE_GMII;
2649 		phydev->is_internal = true;
2650 		return phydev;
2651 
2652 	default:
2653 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2654 		return ERR_PTR(-ENODEV);
2655 	}
2656 }
2657 
2658 /**
2659  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2660  * @dev: LAN78xx device
2661  *
2662  * Configure MAC-side registers according to dev->interface, which should be
2663  * set by lan78xx_get_phy().
2664  *
2665  * - For PHY_INTERFACE_MODE_RGMII:
2666  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2667  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2668  *   connected to the KSZ9897 switch, and the link timing is expected to be
2669  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2670  *   assumed here.
2671  *
2672  * - For PHY_INTERFACE_MODE_RGMII_ID:
2673  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2674  *
2675  * - For GMII, no MAC-specific config is needed.
2676  *
2677  * Return: 0 on success or a negative error code.
2678  */
lan78xx_mac_prepare_for_phy(struct lan78xx_net * dev)2679 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2680 {
2681 	int ret;
2682 
2683 	switch (dev->interface) {
2684 	case PHY_INTERFACE_MODE_RGMII:
2685 		/* Enable MAC-side TX clock delay */
2686 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2687 					MAC_RGMII_ID_TXC_DELAY_EN_);
2688 		if (ret < 0)
2689 			return ret;
2690 
2691 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2692 		if (ret < 0)
2693 			return ret;
2694 
2695 		ret = lan78xx_update_reg(dev, HW_CFG,
2696 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2697 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2698 		if (ret < 0)
2699 			return ret;
2700 
2701 		break;
2702 
2703 	case PHY_INTERFACE_MODE_RGMII_ID:
2704 		/* Disable MAC-side TXC delay, PHY provides it */
2705 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2706 		if (ret < 0)
2707 			return ret;
2708 
2709 		break;
2710 
2711 	case PHY_INTERFACE_MODE_GMII:
2712 		/* No MAC-specific configuration required */
2713 		break;
2714 
2715 	default:
2716 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2717 			    dev->interface);
2718 		break;
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 /**
2725  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2726  * @dev: LAN78xx device
2727  * @phydev: PHY device (must be valid)
2728  *
2729  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2730  * the corresponding number of LEDs by writing to HW_CFG.
2731  *
2732  * This helper preserves the original logic, enabling up to 4 LEDs.
2733  * If the property is not present, this function does nothing.
2734  *
2735  * Return: 0 on success or a negative error code.
2736  */
lan78xx_configure_leds_from_dt(struct lan78xx_net * dev,struct phy_device * phydev)2737 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2738 					  struct phy_device *phydev)
2739 {
2740 	struct device_node *np = phydev->mdio.dev.of_node;
2741 	u32 reg;
2742 	int len, ret;
2743 
2744 	if (!np)
2745 		return 0;
2746 
2747 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2748 					      sizeof(u32));
2749 	if (len < 0)
2750 		return 0;
2751 
2752 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2753 	if (ret < 0)
2754 		return ret;
2755 
2756 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2757 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2758 
2759 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2760 	       (len > 1) * HW_CFG_LED1_EN_ |
2761 	       (len > 2) * HW_CFG_LED2_EN_ |
2762 	       (len > 3) * HW_CFG_LED3_EN_;
2763 
2764 	return lan78xx_write_reg(dev, HW_CFG, reg);
2765 }
2766 
lan78xx_phylink_setup(struct lan78xx_net * dev)2767 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2768 {
2769 	struct phylink_config *pc = &dev->phylink_config;
2770 	struct phylink *phylink;
2771 
2772 	pc->dev = &dev->net->dev;
2773 	pc->type = PHYLINK_NETDEV;
2774 	pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2775 			       MAC_100 | MAC_1000FD;
2776 	pc->mac_managed_pm = true;
2777 	pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2778 	/*
2779 	 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2780 	 *
2781 	 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2782 	 *
2783 	 * Reasoning:
2784 	 * According to the application note in the LAN7800 documentation, a
2785 	 * zero delay may negatively impact the TX data path’s ability to
2786 	 * support Gigabit operation. A value of 50us is recommended as a
2787 	 * reasonable default when the part operates at Gigabit speeds,
2788 	 * balancing stability and power efficiency in EEE mode. This delay can
2789 	 * be increased based on performance testing, as EEE is designed for
2790 	 * scenarios with mostly idle links and occasional bursts of full
2791 	 * bandwidth transmission. The goal is to ensure reliable Gigabit
2792 	 * performance without overly aggressive power optimization during
2793 	 * inactive periods.
2794 	 */
2795 	pc->lpi_timer_default = 50;
2796 	pc->eee_enabled_default = true;
2797 
2798 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2799 		phy_interface_set_rgmii(pc->supported_interfaces);
2800 	else
2801 		__set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2802 
2803 	memcpy(dev->phylink_config.lpi_interfaces,
2804 	       dev->phylink_config.supported_interfaces,
2805 	       sizeof(dev->phylink_config.lpi_interfaces));
2806 
2807 	phylink = phylink_create(pc, dev->net->dev.fwnode,
2808 				 dev->interface, &lan78xx_phylink_mac_ops);
2809 	if (IS_ERR(phylink))
2810 		return PTR_ERR(phylink);
2811 
2812 	dev->phylink = phylink;
2813 
2814 	return 0;
2815 }
2816 
lan78xx_phy_uninit(struct lan78xx_net * dev)2817 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2818 {
2819 	if (dev->phylink) {
2820 		phylink_disconnect_phy(dev->phylink);
2821 		phylink_destroy(dev->phylink);
2822 		dev->phylink = NULL;
2823 	}
2824 }
2825 
lan78xx_phy_init(struct lan78xx_net * dev)2826 static int lan78xx_phy_init(struct lan78xx_net *dev)
2827 {
2828 	struct phy_device *phydev;
2829 	int ret;
2830 
2831 	phydev = lan78xx_get_phy(dev);
2832 	/* phydev can be NULL if no PHY is found and the chip is LAN7801,
2833 	 * which will use a fixed link later.
2834 	 * If an  error occurs, return the error code immediately.
2835 	 */
2836 	if (IS_ERR(phydev))
2837 		return PTR_ERR(phydev);
2838 
2839 	ret = lan78xx_phylink_setup(dev);
2840 	if (ret < 0)
2841 		return ret;
2842 
2843 	ret = lan78xx_mac_prepare_for_phy(dev);
2844 	if (ret < 0)
2845 		goto phylink_uninit;
2846 
2847 	/* If no PHY is found, set up a fixed link. It is very specific to
2848 	 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2849 	 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2850 	 * a visible PHY.
2851 	 */
2852 	if (!phydev) {
2853 		ret = lan78xx_set_fixed_link(dev);
2854 		if (ret < 0)
2855 			goto phylink_uninit;
2856 
2857 		/* No PHY found, so set up a fixed link and return early.
2858 		 * No need to configure PHY IRQ or attach to phylink.
2859 		 */
2860 		return 0;
2861 	}
2862 
2863 	/* if phyirq is not set, use polling mode in phylib */
2864 	if (dev->domain_data.phyirq > 0)
2865 		phydev->irq = dev->domain_data.phyirq;
2866 	else
2867 		phydev->irq = PHY_POLL;
2868 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2869 
2870 	ret = phylink_connect_phy(dev->phylink, phydev);
2871 	if (ret) {
2872 		netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2873 			   dev->mdiobus->id, ERR_PTR(ret));
2874 		goto phylink_uninit;
2875 	}
2876 
2877 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2878 	if (ret < 0)
2879 		goto phylink_uninit;
2880 
2881 	return 0;
2882 
2883 phylink_uninit:
2884 	lan78xx_phy_uninit(dev);
2885 
2886 	return ret;
2887 }
2888 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2889 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2890 {
2891 	bool rxenabled;
2892 	u32 buf;
2893 	int ret;
2894 
2895 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2896 	if (ret < 0)
2897 		return ret;
2898 
2899 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2900 
2901 	if (rxenabled) {
2902 		buf &= ~MAC_RX_RXEN_;
2903 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2904 		if (ret < 0)
2905 			return ret;
2906 	}
2907 
2908 	/* add 4 to size for FCS */
2909 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2910 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2911 
2912 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2913 	if (ret < 0)
2914 		return ret;
2915 
2916 	if (rxenabled) {
2917 		buf |= MAC_RX_RXEN_;
2918 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2919 		if (ret < 0)
2920 			return ret;
2921 	}
2922 
2923 	return 0;
2924 }
2925 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2926 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2927 {
2928 	struct sk_buff *skb;
2929 	unsigned long flags;
2930 	int count = 0;
2931 
2932 	spin_lock_irqsave(&q->lock, flags);
2933 	while (!skb_queue_empty(q)) {
2934 		struct skb_data	*entry;
2935 		struct urb *urb;
2936 		int ret;
2937 
2938 		skb_queue_walk(q, skb) {
2939 			entry = (struct skb_data *)skb->cb;
2940 			if (entry->state != unlink_start)
2941 				goto found;
2942 		}
2943 		break;
2944 found:
2945 		entry->state = unlink_start;
2946 		urb = entry->urb;
2947 
2948 		/* Get reference count of the URB to avoid it to be
2949 		 * freed during usb_unlink_urb, which may trigger
2950 		 * use-after-free problem inside usb_unlink_urb since
2951 		 * usb_unlink_urb is always racing with .complete
2952 		 * handler(include defer_bh).
2953 		 */
2954 		usb_get_urb(urb);
2955 		spin_unlock_irqrestore(&q->lock, flags);
2956 		/* during some PM-driven resume scenarios,
2957 		 * these (async) unlinks complete immediately
2958 		 */
2959 		ret = usb_unlink_urb(urb);
2960 		if (ret != -EINPROGRESS && ret != 0)
2961 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2962 		else
2963 			count++;
2964 		usb_put_urb(urb);
2965 		spin_lock_irqsave(&q->lock, flags);
2966 	}
2967 	spin_unlock_irqrestore(&q->lock, flags);
2968 	return count;
2969 }
2970 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2971 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2972 {
2973 	struct lan78xx_net *dev = netdev_priv(netdev);
2974 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2975 	int ret;
2976 
2977 	/* no second zero-length packet read wanted after mtu-sized packets */
2978 	if ((max_frame_len % dev->maxpacket) == 0)
2979 		return -EDOM;
2980 
2981 	ret = usb_autopm_get_interface(dev->intf);
2982 	if (ret < 0)
2983 		return ret;
2984 
2985 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2986 	if (ret < 0)
2987 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2988 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2989 	else
2990 		WRITE_ONCE(netdev->mtu, new_mtu);
2991 
2992 	usb_autopm_put_interface(dev->intf);
2993 
2994 	return ret;
2995 }
2996 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2997 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2998 {
2999 	struct lan78xx_net *dev = netdev_priv(netdev);
3000 	struct sockaddr *addr = p;
3001 	u32 addr_lo, addr_hi;
3002 	int ret;
3003 
3004 	if (netif_running(netdev))
3005 		return -EBUSY;
3006 
3007 	if (!is_valid_ether_addr(addr->sa_data))
3008 		return -EADDRNOTAVAIL;
3009 
3010 	eth_hw_addr_set(netdev, addr->sa_data);
3011 
3012 	addr_lo = netdev->dev_addr[0] |
3013 		  netdev->dev_addr[1] << 8 |
3014 		  netdev->dev_addr[2] << 16 |
3015 		  netdev->dev_addr[3] << 24;
3016 	addr_hi = netdev->dev_addr[4] |
3017 		  netdev->dev_addr[5] << 8;
3018 
3019 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3024 	if (ret < 0)
3025 		return ret;
3026 
3027 	/* Added to support MAC address changes */
3028 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3029 	if (ret < 0)
3030 		return ret;
3031 
3032 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3033 }
3034 
3035 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)3036 static int lan78xx_set_features(struct net_device *netdev,
3037 				netdev_features_t features)
3038 {
3039 	struct lan78xx_net *dev = netdev_priv(netdev);
3040 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3041 	unsigned long flags;
3042 
3043 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3044 
3045 	if (features & NETIF_F_RXCSUM) {
3046 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3047 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3048 	} else {
3049 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3050 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3051 	}
3052 
3053 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3054 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3055 	else
3056 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3057 
3058 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3059 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3060 	else
3061 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3062 
3063 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3064 
3065 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3066 }
3067 
lan78xx_deferred_vlan_write(struct work_struct * param)3068 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3069 {
3070 	struct lan78xx_priv *pdata =
3071 			container_of(param, struct lan78xx_priv, set_vlan);
3072 	struct lan78xx_net *dev = pdata->dev;
3073 
3074 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3075 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3076 }
3077 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3078 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3079 				   __be16 proto, u16 vid)
3080 {
3081 	struct lan78xx_net *dev = netdev_priv(netdev);
3082 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3083 	u16 vid_bit_index;
3084 	u16 vid_dword_index;
3085 
3086 	vid_dword_index = (vid >> 5) & 0x7F;
3087 	vid_bit_index = vid & 0x1F;
3088 
3089 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3090 
3091 	/* defer register writes to a sleepable context */
3092 	schedule_work(&pdata->set_vlan);
3093 
3094 	return 0;
3095 }
3096 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3097 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3098 				    __be16 proto, u16 vid)
3099 {
3100 	struct lan78xx_net *dev = netdev_priv(netdev);
3101 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3102 	u16 vid_bit_index;
3103 	u16 vid_dword_index;
3104 
3105 	vid_dword_index = (vid >> 5) & 0x7F;
3106 	vid_bit_index = vid & 0x1F;
3107 
3108 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3109 
3110 	/* defer register writes to a sleepable context */
3111 	schedule_work(&pdata->set_vlan);
3112 
3113 	return 0;
3114 }
3115 
lan78xx_init_ltm(struct lan78xx_net * dev)3116 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3117 {
3118 	u32 regs[6] = { 0 };
3119 	int ret;
3120 	u32 buf;
3121 
3122 	/* LAN7850 is USB 2.0 and does not support LTM */
3123 	if (dev->chipid == ID_REV_CHIP_ID_7850_)
3124 		return 0;
3125 
3126 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3127 	if (ret < 0)
3128 		goto init_ltm_failed;
3129 
3130 	if (buf & USB_CFG1_LTM_ENABLE_) {
3131 		u8 temp[2];
3132 		/* Get values from EEPROM first */
3133 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3134 			if (temp[0] == 24) {
3135 				ret = lan78xx_read_raw_eeprom(dev,
3136 							      temp[1] * 2,
3137 							      24,
3138 							      (u8 *)regs);
3139 				if (ret < 0)
3140 					return ret;
3141 			}
3142 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3143 			if (temp[0] == 24) {
3144 				ret = lan78xx_read_raw_otp(dev,
3145 							   temp[1] * 2,
3146 							   24,
3147 							   (u8 *)regs);
3148 				if (ret < 0)
3149 					return ret;
3150 			}
3151 		}
3152 	}
3153 
3154 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3155 	if (ret < 0)
3156 		goto init_ltm_failed;
3157 
3158 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3159 	if (ret < 0)
3160 		goto init_ltm_failed;
3161 
3162 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3163 	if (ret < 0)
3164 		goto init_ltm_failed;
3165 
3166 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3167 	if (ret < 0)
3168 		goto init_ltm_failed;
3169 
3170 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3171 	if (ret < 0)
3172 		goto init_ltm_failed;
3173 
3174 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3175 	if (ret < 0)
3176 		goto init_ltm_failed;
3177 
3178 	return 0;
3179 
3180 init_ltm_failed:
3181 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3182 	return ret;
3183 }
3184 
lan78xx_urb_config_init(struct lan78xx_net * dev)3185 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3186 {
3187 	int result = 0;
3188 
3189 	switch (dev->udev->speed) {
3190 	case USB_SPEED_SUPER:
3191 		dev->rx_urb_size = RX_SS_URB_SIZE;
3192 		dev->tx_urb_size = TX_SS_URB_SIZE;
3193 		dev->n_rx_urbs = RX_SS_URB_NUM;
3194 		dev->n_tx_urbs = TX_SS_URB_NUM;
3195 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3196 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3197 		break;
3198 	case USB_SPEED_HIGH:
3199 		dev->rx_urb_size = RX_HS_URB_SIZE;
3200 		dev->tx_urb_size = TX_HS_URB_SIZE;
3201 		dev->n_rx_urbs = RX_HS_URB_NUM;
3202 		dev->n_tx_urbs = TX_HS_URB_NUM;
3203 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3204 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3205 		break;
3206 	case USB_SPEED_FULL:
3207 		dev->rx_urb_size = RX_FS_URB_SIZE;
3208 		dev->tx_urb_size = TX_FS_URB_SIZE;
3209 		dev->n_rx_urbs = RX_FS_URB_NUM;
3210 		dev->n_tx_urbs = TX_FS_URB_NUM;
3211 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3212 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3213 		break;
3214 	default:
3215 		netdev_warn(dev->net, "USB bus speed not supported\n");
3216 		result = -EIO;
3217 		break;
3218 	}
3219 
3220 	return result;
3221 }
3222 
lan78xx_reset(struct lan78xx_net * dev)3223 static int lan78xx_reset(struct lan78xx_net *dev)
3224 {
3225 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3226 	unsigned long timeout;
3227 	int ret;
3228 	u32 buf;
3229 
3230 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3231 	if (ret < 0)
3232 		return ret;
3233 
3234 	buf |= HW_CFG_LRST_;
3235 
3236 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3237 	if (ret < 0)
3238 		return ret;
3239 
3240 	timeout = jiffies + HZ;
3241 	do {
3242 		mdelay(1);
3243 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3244 		if (ret < 0)
3245 			return ret;
3246 
3247 		if (time_after(jiffies, timeout)) {
3248 			netdev_warn(dev->net,
3249 				    "timeout on completion of LiteReset");
3250 			ret = -ETIMEDOUT;
3251 			return ret;
3252 		}
3253 	} while (buf & HW_CFG_LRST_);
3254 
3255 	/* save DEVID for later usage */
3256 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3257 	if (ret < 0)
3258 		return ret;
3259 
3260 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3261 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3262 
3263 	ret = lan78xx_init_mac_address(dev);
3264 	if (ret < 0)
3265 		return ret;
3266 
3267 	/* Respond to the IN token with a NAK */
3268 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3269 	if (ret < 0)
3270 		return ret;
3271 
3272 	buf |= USB_CFG_BIR_;
3273 
3274 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3275 	if (ret < 0)
3276 		return ret;
3277 
3278 	/* Init LTM */
3279 	ret = lan78xx_init_ltm(dev);
3280 	if (ret < 0)
3281 		return ret;
3282 
3283 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3284 	if (ret < 0)
3285 		return ret;
3286 
3287 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3288 	if (ret < 0)
3289 		return ret;
3290 
3291 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3292 	if (ret < 0)
3293 		return ret;
3294 
3295 	buf |= HW_CFG_MEF_;
3296 	buf |= HW_CFG_CLK125_EN_;
3297 	buf |= HW_CFG_REFCLK25_EN_;
3298 
3299 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3300 	if (ret < 0)
3301 		return ret;
3302 
3303 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3304 	if (ret < 0)
3305 		return ret;
3306 
3307 	buf |= USB_CFG_BCE_;
3308 
3309 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3310 	if (ret < 0)
3311 		return ret;
3312 
3313 	/* set FIFO sizes */
3314 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3315 
3316 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3317 	if (ret < 0)
3318 		return ret;
3319 
3320 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3321 
3322 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3323 	if (ret < 0)
3324 		return ret;
3325 
3326 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3327 	if (ret < 0)
3328 		return ret;
3329 
3330 	ret = lan78xx_write_reg(dev, FLOW, 0);
3331 	if (ret < 0)
3332 		return ret;
3333 
3334 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3335 	if (ret < 0)
3336 		return ret;
3337 
3338 	/* Don't need rfe_ctl_lock during initialisation */
3339 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3340 	if (ret < 0)
3341 		return ret;
3342 
3343 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3344 
3345 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3346 	if (ret < 0)
3347 		return ret;
3348 
3349 	/* Enable or disable checksum offload engines */
3350 	ret = lan78xx_set_features(dev->net, dev->net->features);
3351 	if (ret < 0)
3352 		return ret;
3353 
3354 	lan78xx_set_multicast(dev->net);
3355 
3356 	/* reset PHY */
3357 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3358 	if (ret < 0)
3359 		return ret;
3360 
3361 	buf |= PMT_CTL_PHY_RST_;
3362 
3363 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3364 	if (ret < 0)
3365 		return ret;
3366 
3367 	timeout = jiffies + HZ;
3368 	do {
3369 		mdelay(1);
3370 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3371 		if (ret < 0)
3372 			return ret;
3373 
3374 		if (time_after(jiffies, timeout)) {
3375 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3376 			ret = -ETIMEDOUT;
3377 			return ret;
3378 		}
3379 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3380 
3381 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3382 	if (ret < 0)
3383 		return ret;
3384 
3385 	buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3386 
3387 	/* LAN7801 only has RGMII mode */
3388 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3389 		buf &= ~MAC_CR_GMII_EN_;
3390 
3391 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3392 	if (ret < 0)
3393 		return ret;
3394 
3395 	ret = lan78xx_set_rx_max_frame_length(dev,
3396 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3397 
3398 	return ret;
3399 }
3400 
lan78xx_init_stats(struct lan78xx_net * dev)3401 static void lan78xx_init_stats(struct lan78xx_net *dev)
3402 {
3403 	u32 *p;
3404 	int i;
3405 
3406 	/* initialize for stats update
3407 	 * some counters are 20bits and some are 32bits
3408 	 */
3409 	p = (u32 *)&dev->stats.rollover_max;
3410 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3411 		p[i] = 0xFFFFF;
3412 
3413 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3418 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3419 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3420 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3421 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3422 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3423 
3424 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3425 }
3426 
lan78xx_open(struct net_device * net)3427 static int lan78xx_open(struct net_device *net)
3428 {
3429 	struct lan78xx_net *dev = netdev_priv(net);
3430 	int ret;
3431 
3432 	netif_dbg(dev, ifup, dev->net, "open device");
3433 
3434 	ret = usb_autopm_get_interface(dev->intf);
3435 	if (ret < 0)
3436 		return ret;
3437 
3438 	mutex_lock(&dev->dev_mutex);
3439 
3440 	lan78xx_init_stats(dev);
3441 
3442 	napi_enable(&dev->napi);
3443 
3444 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3445 
3446 	/* for Link Check */
3447 	if (dev->urb_intr) {
3448 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3449 		if (ret < 0) {
3450 			netif_err(dev, ifup, dev->net,
3451 				  "intr submit %d\n", ret);
3452 			goto done;
3453 		}
3454 	}
3455 
3456 	phylink_start(dev->phylink);
3457 
3458 done:
3459 	mutex_unlock(&dev->dev_mutex);
3460 
3461 	if (ret < 0)
3462 		usb_autopm_put_interface(dev->intf);
3463 
3464 	return ret;
3465 }
3466 
lan78xx_terminate_urbs(struct lan78xx_net * dev)3467 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3468 {
3469 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3470 	DECLARE_WAITQUEUE(wait, current);
3471 	int temp;
3472 
3473 	/* ensure there are no more active urbs */
3474 	add_wait_queue(&unlink_wakeup, &wait);
3475 	set_current_state(TASK_UNINTERRUPTIBLE);
3476 	dev->wait = &unlink_wakeup;
3477 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3478 
3479 	/* maybe wait for deletions to finish. */
3480 	while (!skb_queue_empty(&dev->rxq) ||
3481 	       !skb_queue_empty(&dev->txq)) {
3482 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3483 		set_current_state(TASK_UNINTERRUPTIBLE);
3484 		netif_dbg(dev, ifdown, dev->net,
3485 			  "waited for %d urb completions", temp);
3486 	}
3487 	set_current_state(TASK_RUNNING);
3488 	dev->wait = NULL;
3489 	remove_wait_queue(&unlink_wakeup, &wait);
3490 
3491 	/* empty Rx done, Rx overflow and Tx pend queues
3492 	 */
3493 	while (!skb_queue_empty(&dev->rxq_done)) {
3494 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3495 
3496 		lan78xx_release_rx_buf(dev, skb);
3497 	}
3498 
3499 	skb_queue_purge(&dev->rxq_overflow);
3500 	skb_queue_purge(&dev->txq_pend);
3501 }
3502 
lan78xx_stop(struct net_device * net)3503 static int lan78xx_stop(struct net_device *net)
3504 {
3505 	struct lan78xx_net *dev = netdev_priv(net);
3506 
3507 	netif_dbg(dev, ifup, dev->net, "stop device");
3508 
3509 	mutex_lock(&dev->dev_mutex);
3510 
3511 	if (timer_pending(&dev->stat_monitor))
3512 		timer_delete_sync(&dev->stat_monitor);
3513 
3514 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3515 	napi_disable(&dev->napi);
3516 
3517 	lan78xx_terminate_urbs(dev);
3518 
3519 	netif_info(dev, ifdown, dev->net,
3520 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3521 		   net->stats.rx_packets, net->stats.tx_packets,
3522 		   net->stats.rx_errors, net->stats.tx_errors);
3523 
3524 	phylink_stop(dev->phylink);
3525 
3526 	usb_kill_urb(dev->urb_intr);
3527 
3528 	/* deferred work (task, timer, softirq) must also stop.
3529 	 * can't flush_scheduled_work() until we drop rtnl (later),
3530 	 * else workers could deadlock; so make workers a NOP.
3531 	 */
3532 	clear_bit(EVENT_TX_HALT, &dev->flags);
3533 	clear_bit(EVENT_RX_HALT, &dev->flags);
3534 	clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3535 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3536 
3537 	cancel_delayed_work_sync(&dev->wq);
3538 
3539 	usb_autopm_put_interface(dev->intf);
3540 
3541 	mutex_unlock(&dev->dev_mutex);
3542 
3543 	return 0;
3544 }
3545 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3546 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3547 			       struct sk_buff_head *list, enum skb_state state)
3548 {
3549 	unsigned long flags;
3550 	enum skb_state old_state;
3551 	struct skb_data *entry = (struct skb_data *)skb->cb;
3552 
3553 	spin_lock_irqsave(&list->lock, flags);
3554 	old_state = entry->state;
3555 	entry->state = state;
3556 
3557 	__skb_unlink(skb, list);
3558 	spin_unlock(&list->lock);
3559 	spin_lock(&dev->rxq_done.lock);
3560 
3561 	__skb_queue_tail(&dev->rxq_done, skb);
3562 	if (skb_queue_len(&dev->rxq_done) == 1)
3563 		napi_schedule(&dev->napi);
3564 
3565 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3566 
3567 	return old_state;
3568 }
3569 
tx_complete(struct urb * urb)3570 static void tx_complete(struct urb *urb)
3571 {
3572 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3573 	struct skb_data *entry = (struct skb_data *)skb->cb;
3574 	struct lan78xx_net *dev = entry->dev;
3575 
3576 	if (urb->status == 0) {
3577 		dev->net->stats.tx_packets += entry->num_of_packet;
3578 		dev->net->stats.tx_bytes += entry->length;
3579 	} else {
3580 		dev->net->stats.tx_errors += entry->num_of_packet;
3581 
3582 		switch (urb->status) {
3583 		case -EPIPE:
3584 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3585 			break;
3586 
3587 		/* software-driven interface shutdown */
3588 		case -ECONNRESET:
3589 		case -ESHUTDOWN:
3590 			netif_dbg(dev, tx_err, dev->net,
3591 				  "tx err interface gone %d\n",
3592 				  entry->urb->status);
3593 			break;
3594 
3595 		case -EPROTO:
3596 		case -ETIME:
3597 		case -EILSEQ:
3598 			netif_stop_queue(dev->net);
3599 			netif_dbg(dev, tx_err, dev->net,
3600 				  "tx err queue stopped %d\n",
3601 				  entry->urb->status);
3602 			break;
3603 		default:
3604 			netif_dbg(dev, tx_err, dev->net,
3605 				  "unknown tx err %d\n",
3606 				  entry->urb->status);
3607 			break;
3608 		}
3609 	}
3610 
3611 	usb_autopm_put_interface_async(dev->intf);
3612 
3613 	skb_unlink(skb, &dev->txq);
3614 
3615 	lan78xx_release_tx_buf(dev, skb);
3616 
3617 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3618 	 */
3619 	if (skb_queue_empty(&dev->txq) &&
3620 	    !skb_queue_empty(&dev->txq_pend))
3621 		napi_schedule(&dev->napi);
3622 }
3623 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3624 static void lan78xx_queue_skb(struct sk_buff_head *list,
3625 			      struct sk_buff *newsk, enum skb_state state)
3626 {
3627 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3628 
3629 	__skb_queue_tail(list, newsk);
3630 	entry->state = state;
3631 }
3632 
lan78xx_tx_urb_space(struct lan78xx_net * dev)3633 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3634 {
3635 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3636 }
3637 
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3638 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3639 {
3640 	return dev->tx_pend_data_len;
3641 }
3642 
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3643 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3644 				    struct sk_buff *skb,
3645 				    unsigned int *tx_pend_data_len)
3646 {
3647 	unsigned long flags;
3648 
3649 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3650 
3651 	__skb_queue_tail(&dev->txq_pend, skb);
3652 
3653 	dev->tx_pend_data_len += skb->len;
3654 	*tx_pend_data_len = dev->tx_pend_data_len;
3655 
3656 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3657 }
3658 
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3659 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3660 					 struct sk_buff *skb,
3661 					 unsigned int *tx_pend_data_len)
3662 {
3663 	unsigned long flags;
3664 
3665 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3666 
3667 	__skb_queue_head(&dev->txq_pend, skb);
3668 
3669 	dev->tx_pend_data_len += skb->len;
3670 	*tx_pend_data_len = dev->tx_pend_data_len;
3671 
3672 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3673 }
3674 
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3675 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3676 				    struct sk_buff **skb,
3677 				    unsigned int *tx_pend_data_len)
3678 {
3679 	unsigned long flags;
3680 
3681 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3682 
3683 	*skb = __skb_dequeue(&dev->txq_pend);
3684 	if (*skb)
3685 		dev->tx_pend_data_len -= (*skb)->len;
3686 	*tx_pend_data_len = dev->tx_pend_data_len;
3687 
3688 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3689 }
3690 
3691 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3692 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3693 {
3694 	struct lan78xx_net *dev = netdev_priv(net);
3695 	unsigned int tx_pend_data_len;
3696 
3697 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3698 		schedule_delayed_work(&dev->wq, 0);
3699 
3700 	skb_tx_timestamp(skb);
3701 
3702 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3703 
3704 	/* Set up a Tx URB if none is in progress */
3705 
3706 	if (skb_queue_empty(&dev->txq))
3707 		napi_schedule(&dev->napi);
3708 
3709 	/* Stop stack Tx queue if we have enough data to fill
3710 	 * all the free Tx URBs.
3711 	 */
3712 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3713 		netif_stop_queue(net);
3714 
3715 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3716 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3717 
3718 		/* Kick off transmission of pending data */
3719 
3720 		if (!skb_queue_empty(&dev->txq_free))
3721 			napi_schedule(&dev->napi);
3722 	}
3723 
3724 	return NETDEV_TX_OK;
3725 }
3726 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3727 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3728 {
3729 	struct lan78xx_priv *pdata = NULL;
3730 	int ret;
3731 	int i;
3732 
3733 	dev->data[0] = (unsigned long) kzalloc_obj(*pdata);
3734 
3735 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3736 	if (!pdata) {
3737 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3738 		return -ENOMEM;
3739 	}
3740 
3741 	pdata->dev = dev;
3742 
3743 	spin_lock_init(&pdata->rfe_ctl_lock);
3744 	mutex_init(&pdata->dataport_mutex);
3745 
3746 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3747 
3748 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3749 		pdata->vlan_table[i] = 0;
3750 
3751 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3752 
3753 	dev->net->features = 0;
3754 
3755 	if (DEFAULT_TX_CSUM_ENABLE)
3756 		dev->net->features |= NETIF_F_HW_CSUM;
3757 
3758 	if (DEFAULT_RX_CSUM_ENABLE)
3759 		dev->net->features |= NETIF_F_RXCSUM;
3760 
3761 	if (DEFAULT_TSO_CSUM_ENABLE)
3762 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3763 
3764 	if (DEFAULT_VLAN_RX_OFFLOAD)
3765 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3766 
3767 	if (DEFAULT_VLAN_FILTER_ENABLE)
3768 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3769 
3770 	dev->net->hw_features = dev->net->features;
3771 
3772 	ret = lan78xx_setup_irq_domain(dev);
3773 	if (ret < 0) {
3774 		netdev_warn(dev->net,
3775 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3776 		goto out1;
3777 	}
3778 
3779 	/* Init all registers */
3780 	ret = lan78xx_reset(dev);
3781 	if (ret) {
3782 		netdev_warn(dev->net, "Registers INIT FAILED....");
3783 		goto out2;
3784 	}
3785 
3786 	ret = lan78xx_mdio_init(dev);
3787 	if (ret) {
3788 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3789 		goto out2;
3790 	}
3791 
3792 	dev->net->flags |= IFF_MULTICAST;
3793 
3794 	pdata->wol = WAKE_MAGIC;
3795 
3796 	return ret;
3797 
3798 out2:
3799 	lan78xx_remove_irq_domain(dev);
3800 
3801 out1:
3802 	netdev_warn(dev->net, "Bind routine FAILED");
3803 	cancel_work_sync(&pdata->set_multicast);
3804 	cancel_work_sync(&pdata->set_vlan);
3805 	kfree(pdata);
3806 	return ret;
3807 }
3808 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3809 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3810 {
3811 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3812 
3813 	lan78xx_remove_irq_domain(dev);
3814 
3815 	lan78xx_remove_mdio(dev);
3816 
3817 	if (pdata) {
3818 		cancel_work_sync(&pdata->set_multicast);
3819 		cancel_work_sync(&pdata->set_vlan);
3820 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3821 		kfree(pdata);
3822 		pdata = NULL;
3823 		dev->data[0] = 0;
3824 	}
3825 }
3826 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3827 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3828 				    struct sk_buff *skb,
3829 				    u32 rx_cmd_a, u32 rx_cmd_b)
3830 {
3831 	/* HW Checksum offload appears to be flawed if used when not stripping
3832 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3833 	 */
3834 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3835 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3836 	    unlikely(rx_cmd_a & RX_CMD_A_CSE_MASK_) ||
3837 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3838 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3839 		skb->ip_summed = CHECKSUM_NONE;
3840 	} else {
3841 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3842 		skb->ip_summed = CHECKSUM_COMPLETE;
3843 	}
3844 }
3845 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3846 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3847 				    struct sk_buff *skb,
3848 				    u32 rx_cmd_a, u32 rx_cmd_b)
3849 {
3850 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3851 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3852 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3853 				       (rx_cmd_b & 0xffff));
3854 }
3855 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3856 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3857 {
3858 	dev->net->stats.rx_packets++;
3859 	dev->net->stats.rx_bytes += skb->len;
3860 
3861 	skb->protocol = eth_type_trans(skb, dev->net);
3862 
3863 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3864 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3865 	memset(skb->cb, 0, sizeof(struct skb_data));
3866 
3867 	if (skb_defer_rx_timestamp(skb))
3868 		return;
3869 
3870 	napi_gro_receive(&dev->napi, skb);
3871 }
3872 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3873 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3874 		      int budget, int *work_done)
3875 {
3876 	if (skb->len < RX_SKB_MIN_LEN)
3877 		return 0;
3878 
3879 	/* Extract frames from the URB buffer and pass each one to
3880 	 * the stack in a new NAPI SKB.
3881 	 */
3882 	while (skb->len > 0) {
3883 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3884 		u16 rx_cmd_c;
3885 		unsigned char *packet;
3886 
3887 		rx_cmd_a = get_unaligned_le32(skb->data);
3888 		skb_pull(skb, sizeof(rx_cmd_a));
3889 
3890 		rx_cmd_b = get_unaligned_le32(skb->data);
3891 		skb_pull(skb, sizeof(rx_cmd_b));
3892 
3893 		rx_cmd_c = get_unaligned_le16(skb->data);
3894 		skb_pull(skb, sizeof(rx_cmd_c));
3895 
3896 		packet = skb->data;
3897 
3898 		/* get the packet length */
3899 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3900 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3901 
3902 		if (unlikely(size > skb->len)) {
3903 			netif_dbg(dev, rx_err, dev->net,
3904 				  "size err rx_cmd_a=0x%08x\n",
3905 				  rx_cmd_a);
3906 			return 0;
3907 		}
3908 
3909 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_) &&
3910 		    (rx_cmd_a & RX_CMD_A_RX_HARD_ERRS_MASK_)) {
3911 			netif_dbg(dev, rx_err, dev->net,
3912 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3913 		} else {
3914 			u32 frame_len;
3915 			struct sk_buff *skb2;
3916 
3917 			if (unlikely(size < ETH_FCS_LEN)) {
3918 				netif_dbg(dev, rx_err, dev->net,
3919 					  "size err rx_cmd_a=0x%08x\n",
3920 					  rx_cmd_a);
3921 				return 0;
3922 			}
3923 
3924 			frame_len = size - ETH_FCS_LEN;
3925 
3926 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3927 			if (!skb2)
3928 				return 0;
3929 
3930 			memcpy(skb2->data, packet, frame_len);
3931 
3932 			skb_put(skb2, frame_len);
3933 
3934 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3935 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3936 
3937 			/* Processing of the URB buffer must complete once
3938 			 * it has started. If the NAPI work budget is exhausted
3939 			 * while frames remain they are added to the overflow
3940 			 * queue for delivery in the next NAPI polling cycle.
3941 			 */
3942 			if (*work_done < budget) {
3943 				lan78xx_skb_return(dev, skb2);
3944 				++(*work_done);
3945 			} else {
3946 				skb_queue_tail(&dev->rxq_overflow, skb2);
3947 			}
3948 		}
3949 
3950 		skb_pull(skb, size);
3951 
3952 		/* skip padding bytes before the next frame starts */
3953 		if (skb->len)
3954 			skb_pull(skb, align_count);
3955 	}
3956 
3957 	return 1;
3958 }
3959 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3960 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3961 			      int budget, int *work_done)
3962 {
3963 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3964 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3965 		dev->net->stats.rx_errors++;
3966 	}
3967 }
3968 
rx_complete(struct urb * urb)3969 static void rx_complete(struct urb *urb)
3970 {
3971 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3972 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3973 	struct lan78xx_net *dev = entry->dev;
3974 	int urb_status = urb->status;
3975 	enum skb_state state;
3976 
3977 	netif_dbg(dev, rx_status, dev->net,
3978 		  "rx done: status %d", urb->status);
3979 
3980 	skb_put(skb, urb->actual_length);
3981 	state = rx_done;
3982 
3983 	if (urb != entry->urb)
3984 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3985 
3986 	switch (urb_status) {
3987 	case 0:
3988 		if (skb->len < RX_SKB_MIN_LEN) {
3989 			state = rx_cleanup;
3990 			dev->net->stats.rx_errors++;
3991 			dev->net->stats.rx_length_errors++;
3992 			netif_dbg(dev, rx_err, dev->net,
3993 				  "rx length %d\n", skb->len);
3994 		}
3995 		usb_mark_last_busy(dev->udev);
3996 		break;
3997 	case -EPIPE:
3998 		dev->net->stats.rx_errors++;
3999 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4000 		fallthrough;
4001 	case -ECONNRESET:				/* async unlink */
4002 	case -ESHUTDOWN:				/* hardware gone */
4003 		netif_dbg(dev, ifdown, dev->net,
4004 			  "rx shutdown, code %d\n", urb_status);
4005 		state = rx_cleanup;
4006 		break;
4007 	case -EPROTO:
4008 	case -ETIME:
4009 	case -EILSEQ:
4010 		dev->net->stats.rx_errors++;
4011 		state = rx_cleanup;
4012 		break;
4013 
4014 	/* data overrun ... flush fifo? */
4015 	case -EOVERFLOW:
4016 		dev->net->stats.rx_over_errors++;
4017 		fallthrough;
4018 
4019 	default:
4020 		state = rx_cleanup;
4021 		dev->net->stats.rx_errors++;
4022 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4023 		break;
4024 	}
4025 
4026 	state = defer_bh(dev, skb, &dev->rxq, state);
4027 }
4028 
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)4029 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4030 {
4031 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4032 	size_t size = dev->rx_urb_size;
4033 	struct urb *urb = entry->urb;
4034 	unsigned long lockflags;
4035 	int ret = 0;
4036 
4037 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4038 			  skb->data, size, rx_complete, skb);
4039 
4040 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4041 
4042 	if (netif_device_present(dev->net) &&
4043 	    netif_running(dev->net) &&
4044 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4045 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4046 		ret = usb_submit_urb(urb, flags);
4047 		switch (ret) {
4048 		case 0:
4049 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4050 			break;
4051 		case -EPIPE:
4052 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4053 			break;
4054 		case -ENODEV:
4055 		case -ENOENT:
4056 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4057 			netif_device_detach(dev->net);
4058 			break;
4059 		case -EHOSTUNREACH:
4060 			ret = -ENOLINK;
4061 			napi_schedule(&dev->napi);
4062 			break;
4063 		default:
4064 			netif_dbg(dev, rx_err, dev->net,
4065 				  "rx submit, %d\n", ret);
4066 			napi_schedule(&dev->napi);
4067 			break;
4068 		}
4069 	} else {
4070 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4071 		ret = -ENOLINK;
4072 	}
4073 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4074 
4075 	if (ret)
4076 		lan78xx_release_rx_buf(dev, skb);
4077 
4078 	return ret;
4079 }
4080 
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)4081 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4082 {
4083 	struct sk_buff *rx_buf;
4084 
4085 	/* Ensure the maximum number of Rx URBs is submitted
4086 	 */
4087 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4088 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4089 			break;
4090 	}
4091 }
4092 
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)4093 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4094 				    struct sk_buff *rx_buf)
4095 {
4096 	/* reset SKB data pointers */
4097 
4098 	rx_buf->data = rx_buf->head;
4099 	skb_reset_tail_pointer(rx_buf);
4100 	rx_buf->len = 0;
4101 	rx_buf->data_len = 0;
4102 
4103 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4104 }
4105 
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)4106 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4107 {
4108 	u32 tx_cmd_a;
4109 	u32 tx_cmd_b;
4110 
4111 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4112 
4113 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4114 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4115 
4116 	tx_cmd_b = 0;
4117 	if (skb_is_gso(skb)) {
4118 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4119 
4120 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4121 
4122 		tx_cmd_a |= TX_CMD_A_LSO_;
4123 	}
4124 
4125 	if (skb_vlan_tag_present(skb)) {
4126 		tx_cmd_a |= TX_CMD_A_IVTG_;
4127 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4128 	}
4129 
4130 	put_unaligned_le32(tx_cmd_a, buffer);
4131 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4132 }
4133 
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)4134 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4135 					    struct sk_buff *tx_buf)
4136 {
4137 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4138 	int remain = dev->tx_urb_size;
4139 	u8 *tx_data = tx_buf->data;
4140 	u32 urb_len = 0;
4141 
4142 	entry->num_of_packet = 0;
4143 	entry->length = 0;
4144 
4145 	/* Work through the pending SKBs and copy the data of each SKB into
4146 	 * the URB buffer if there room for all the SKB data.
4147 	 *
4148 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4149 	 */
4150 	while (remain >= TX_SKB_MIN_LEN) {
4151 		unsigned int pending_bytes;
4152 		unsigned int align_bytes;
4153 		struct sk_buff *skb;
4154 		unsigned int len;
4155 
4156 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4157 
4158 		if (!skb)
4159 			break;
4160 
4161 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4162 			      TX_ALIGNMENT;
4163 		len = align_bytes + TX_CMD_LEN + skb->len;
4164 		if (len > remain) {
4165 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4166 			break;
4167 		}
4168 
4169 		tx_data += align_bytes;
4170 
4171 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4172 		tx_data += TX_CMD_LEN;
4173 
4174 		len = skb->len;
4175 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4176 			struct net_device_stats *stats = &dev->net->stats;
4177 
4178 			stats->tx_dropped++;
4179 			dev_kfree_skb_any(skb);
4180 			tx_data -= TX_CMD_LEN;
4181 			continue;
4182 		}
4183 
4184 		tx_data += len;
4185 		entry->length += max_t(unsigned int, len, ETH_ZLEN);
4186 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4187 
4188 		dev_kfree_skb_any(skb);
4189 
4190 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4191 
4192 		remain = dev->tx_urb_size - urb_len;
4193 	}
4194 
4195 	skb_put(tx_buf, urb_len);
4196 
4197 	return entry;
4198 }
4199 
lan78xx_tx_bh(struct lan78xx_net * dev)4200 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4201 {
4202 	int ret;
4203 
4204 	/* Start the stack Tx queue if it was stopped
4205 	 */
4206 	netif_tx_lock(dev->net);
4207 	if (netif_queue_stopped(dev->net)) {
4208 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4209 			netif_wake_queue(dev->net);
4210 	}
4211 	netif_tx_unlock(dev->net);
4212 
4213 	/* Go through the Tx pending queue and set up URBs to transfer
4214 	 * the data to the device. Stop if no more pending data or URBs,
4215 	 * or if an error occurs when a URB is submitted.
4216 	 */
4217 	do {
4218 		struct skb_data *entry;
4219 		struct sk_buff *tx_buf;
4220 		unsigned long flags;
4221 
4222 		if (skb_queue_empty(&dev->txq_pend))
4223 			break;
4224 
4225 		tx_buf = lan78xx_get_tx_buf(dev);
4226 		if (!tx_buf)
4227 			break;
4228 
4229 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4230 
4231 		spin_lock_irqsave(&dev->txq.lock, flags);
4232 		ret = usb_autopm_get_interface_async(dev->intf);
4233 		if (ret < 0) {
4234 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4235 			goto out;
4236 		}
4237 
4238 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4239 				  tx_buf->data, tx_buf->len, tx_complete,
4240 				  tx_buf);
4241 
4242 		if (tx_buf->len % dev->maxpacket == 0) {
4243 			/* send USB_ZERO_PACKET */
4244 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4245 		}
4246 
4247 #ifdef CONFIG_PM
4248 		/* if device is asleep stop outgoing packet processing */
4249 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4250 			usb_anchor_urb(entry->urb, &dev->deferred);
4251 			netif_stop_queue(dev->net);
4252 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4253 			netdev_dbg(dev->net,
4254 				   "Delaying transmission for resumption\n");
4255 			return;
4256 		}
4257 #endif
4258 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4259 		switch (ret) {
4260 		case 0:
4261 			netif_trans_update(dev->net);
4262 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4263 			break;
4264 		case -EPIPE:
4265 			netif_stop_queue(dev->net);
4266 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4267 			usb_autopm_put_interface_async(dev->intf);
4268 			break;
4269 		case -ENODEV:
4270 		case -ENOENT:
4271 			netif_dbg(dev, tx_err, dev->net,
4272 				  "tx submit urb err %d (disconnected?)", ret);
4273 			netif_device_detach(dev->net);
4274 			break;
4275 		default:
4276 			usb_autopm_put_interface_async(dev->intf);
4277 			netif_dbg(dev, tx_err, dev->net,
4278 				  "tx submit urb err %d\n", ret);
4279 			break;
4280 		}
4281 
4282 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4283 
4284 		if (ret) {
4285 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4286 out:
4287 			dev->net->stats.tx_dropped += entry->num_of_packet;
4288 			lan78xx_release_tx_buf(dev, tx_buf);
4289 		}
4290 	} while (ret == 0);
4291 }
4292 
lan78xx_bh(struct lan78xx_net * dev,int budget)4293 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4294 {
4295 	struct sk_buff_head done;
4296 	struct sk_buff *rx_buf;
4297 	struct skb_data *entry;
4298 	unsigned long flags;
4299 	int work_done = 0;
4300 
4301 	/* Pass frames received in the last NAPI cycle before
4302 	 * working on newly completed URBs.
4303 	 */
4304 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4305 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4306 		++work_done;
4307 	}
4308 
4309 	/* Take a snapshot of the done queue and move items to a
4310 	 * temporary queue. Rx URB completions will continue to add
4311 	 * to the done queue.
4312 	 */
4313 	__skb_queue_head_init(&done);
4314 
4315 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4316 	skb_queue_splice_init(&dev->rxq_done, &done);
4317 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4318 
4319 	/* Extract receive frames from completed URBs and
4320 	 * pass them to the stack. Re-submit each completed URB.
4321 	 */
4322 	while ((work_done < budget) &&
4323 	       (rx_buf = __skb_dequeue(&done))) {
4324 		entry = (struct skb_data *)(rx_buf->cb);
4325 		switch (entry->state) {
4326 		case rx_done:
4327 			rx_process(dev, rx_buf, budget, &work_done);
4328 			break;
4329 		case rx_cleanup:
4330 			break;
4331 		default:
4332 			netdev_dbg(dev->net, "rx buf state %d\n",
4333 				   entry->state);
4334 			break;
4335 		}
4336 
4337 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4338 	}
4339 
4340 	/* If budget was consumed before processing all the URBs put them
4341 	 * back on the front of the done queue. They will be first to be
4342 	 * processed in the next NAPI cycle.
4343 	 */
4344 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4345 	skb_queue_splice(&done, &dev->rxq_done);
4346 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4347 
4348 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4349 		/* reset update timer delta */
4350 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4351 			dev->delta = 1;
4352 			mod_timer(&dev->stat_monitor,
4353 				  jiffies + STAT_UPDATE_TIMER);
4354 		}
4355 
4356 		/* Submit all free Rx URBs */
4357 
4358 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4359 			lan78xx_rx_urb_submit_all(dev);
4360 
4361 		/* Submit new Tx URBs */
4362 
4363 		lan78xx_tx_bh(dev);
4364 	}
4365 
4366 	return work_done;
4367 }
4368 
lan78xx_poll(struct napi_struct * napi,int budget)4369 static int lan78xx_poll(struct napi_struct *napi, int budget)
4370 {
4371 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4372 	int result = budget;
4373 	int work_done;
4374 
4375 	/* Don't do any work if the device is suspended */
4376 
4377 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4378 		napi_complete_done(napi, 0);
4379 		return 0;
4380 	}
4381 
4382 	/* Process completed URBs and submit new URBs */
4383 
4384 	work_done = lan78xx_bh(dev, budget);
4385 
4386 	if (work_done < budget) {
4387 		napi_complete_done(napi, work_done);
4388 
4389 		/* Start a new polling cycle if data was received or
4390 		 * data is waiting to be transmitted.
4391 		 */
4392 		if (!skb_queue_empty(&dev->rxq_done)) {
4393 			napi_schedule(napi);
4394 		} else if (netif_carrier_ok(dev->net)) {
4395 			if (skb_queue_empty(&dev->txq) &&
4396 			    !skb_queue_empty(&dev->txq_pend)) {
4397 				napi_schedule(napi);
4398 			} else {
4399 				netif_tx_lock(dev->net);
4400 				if (netif_queue_stopped(dev->net)) {
4401 					netif_wake_queue(dev->net);
4402 					napi_schedule(napi);
4403 				}
4404 				netif_tx_unlock(dev->net);
4405 			}
4406 		}
4407 		result = work_done;
4408 	}
4409 
4410 	return result;
4411 }
4412 
lan78xx_delayedwork(struct work_struct * work)4413 static void lan78xx_delayedwork(struct work_struct *work)
4414 {
4415 	int status;
4416 	struct lan78xx_net *dev;
4417 
4418 	dev = container_of(work, struct lan78xx_net, wq.work);
4419 
4420 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4421 		return;
4422 
4423 	if (usb_autopm_get_interface(dev->intf) < 0)
4424 		return;
4425 
4426 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4427 		unlink_urbs(dev, &dev->txq);
4428 
4429 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4430 		if (status < 0 &&
4431 		    status != -EPIPE &&
4432 		    status != -ESHUTDOWN) {
4433 			if (netif_msg_tx_err(dev))
4434 				netdev_err(dev->net,
4435 					   "can't clear tx halt, status %d\n",
4436 					   status);
4437 		} else {
4438 			clear_bit(EVENT_TX_HALT, &dev->flags);
4439 			if (status != -ESHUTDOWN)
4440 				netif_wake_queue(dev->net);
4441 		}
4442 	}
4443 
4444 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4445 		unlink_urbs(dev, &dev->rxq);
4446 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4447 		if (status < 0 &&
4448 		    status != -EPIPE &&
4449 		    status != -ESHUTDOWN) {
4450 			if (netif_msg_rx_err(dev))
4451 				netdev_err(dev->net,
4452 					   "can't clear rx halt, status %d\n",
4453 					   status);
4454 		} else {
4455 			clear_bit(EVENT_RX_HALT, &dev->flags);
4456 			napi_schedule(&dev->napi);
4457 		}
4458 	}
4459 
4460 	if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4461 		int ret = 0;
4462 
4463 		clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4464 		ret = lan78xx_phy_int_ack(dev);
4465 		if (ret)
4466 			netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4467 				    ERR_PTR(ret));
4468 	}
4469 
4470 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4471 		lan78xx_update_stats(dev);
4472 
4473 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4474 
4475 		mod_timer(&dev->stat_monitor,
4476 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4477 
4478 		dev->delta = min((dev->delta * 2), 50);
4479 	}
4480 
4481 	usb_autopm_put_interface(dev->intf);
4482 }
4483 
intr_complete(struct urb * urb)4484 static void intr_complete(struct urb *urb)
4485 {
4486 	struct lan78xx_net *dev = urb->context;
4487 	int status = urb->status;
4488 
4489 	switch (status) {
4490 	/* success */
4491 	case 0:
4492 		lan78xx_status(dev, urb);
4493 		break;
4494 
4495 	/* software-driven interface shutdown */
4496 	case -ENOENT:			/* urb killed */
4497 	case -ENODEV:			/* hardware gone */
4498 	case -ESHUTDOWN:		/* hardware gone */
4499 		netif_dbg(dev, ifdown, dev->net,
4500 			  "intr shutdown, code %d\n", status);
4501 		return;
4502 
4503 	/* NOTE:  not throttling like RX/TX, since this endpoint
4504 	 * already polls infrequently
4505 	 */
4506 	default:
4507 		netdev_dbg(dev->net, "intr status %d\n", status);
4508 		break;
4509 	}
4510 
4511 	if (!netif_device_present(dev->net) ||
4512 	    !netif_running(dev->net)) {
4513 		netdev_warn(dev->net, "not submitting new status URB");
4514 		return;
4515 	}
4516 
4517 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4518 	status = usb_submit_urb(urb, GFP_ATOMIC);
4519 
4520 	switch (status) {
4521 	case  0:
4522 		break;
4523 	case -ENODEV:
4524 	case -ENOENT:
4525 		netif_dbg(dev, timer, dev->net,
4526 			  "intr resubmit %d (disconnect?)", status);
4527 		netif_device_detach(dev->net);
4528 		break;
4529 	default:
4530 		netif_err(dev, timer, dev->net,
4531 			  "intr resubmit --> %d\n", status);
4532 		break;
4533 	}
4534 }
4535 
lan78xx_disconnect(struct usb_interface * intf)4536 static void lan78xx_disconnect(struct usb_interface *intf)
4537 {
4538 	struct lan78xx_net *dev;
4539 	struct usb_device *udev;
4540 	struct net_device *net;
4541 
4542 	dev = usb_get_intfdata(intf);
4543 	usb_set_intfdata(intf, NULL);
4544 	if (!dev)
4545 		return;
4546 
4547 	udev = interface_to_usbdev(intf);
4548 	net = dev->net;
4549 
4550 	rtnl_lock();
4551 	phylink_stop(dev->phylink);
4552 	phylink_disconnect_phy(dev->phylink);
4553 	rtnl_unlock();
4554 
4555 	unregister_netdev(net);
4556 
4557 	timer_shutdown_sync(&dev->stat_monitor);
4558 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4559 	cancel_delayed_work_sync(&dev->wq);
4560 
4561 	phylink_destroy(dev->phylink);
4562 
4563 	usb_scuttle_anchored_urbs(&dev->deferred);
4564 
4565 	lan78xx_unbind(dev, intf);
4566 
4567 	lan78xx_free_tx_resources(dev);
4568 	lan78xx_free_rx_resources(dev);
4569 
4570 	usb_kill_urb(dev->urb_intr);
4571 	usb_free_urb(dev->urb_intr);
4572 
4573 	free_netdev(net);
4574 	usb_put_dev(udev);
4575 }
4576 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4577 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4578 {
4579 	struct lan78xx_net *dev = netdev_priv(net);
4580 
4581 	unlink_urbs(dev, &dev->txq);
4582 	napi_schedule(&dev->napi);
4583 }
4584 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4585 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4586 						struct net_device *netdev,
4587 						netdev_features_t features)
4588 {
4589 	struct lan78xx_net *dev = netdev_priv(netdev);
4590 
4591 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4592 		features &= ~NETIF_F_GSO_MASK;
4593 
4594 	features = vlan_features_check(skb, features);
4595 	features = vxlan_features_check(skb, features);
4596 
4597 	return features;
4598 }
4599 
4600 static const struct net_device_ops lan78xx_netdev_ops = {
4601 	.ndo_open		= lan78xx_open,
4602 	.ndo_stop		= lan78xx_stop,
4603 	.ndo_start_xmit		= lan78xx_start_xmit,
4604 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4605 	.ndo_change_mtu		= lan78xx_change_mtu,
4606 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4607 	.ndo_validate_addr	= eth_validate_addr,
4608 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4609 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4610 	.ndo_set_features	= lan78xx_set_features,
4611 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4612 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4613 	.ndo_features_check	= lan78xx_features_check,
4614 };
4615 
lan78xx_stat_monitor(struct timer_list * t)4616 static void lan78xx_stat_monitor(struct timer_list *t)
4617 {
4618 	struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4619 
4620 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4621 }
4622 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4623 static int lan78xx_probe(struct usb_interface *intf,
4624 			 const struct usb_device_id *id)
4625 {
4626 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4627 	struct lan78xx_net *dev;
4628 	struct net_device *netdev;
4629 	struct usb_device *udev;
4630 	int ret;
4631 	unsigned int maxp;
4632 	unsigned int period;
4633 	u8 *buf = NULL;
4634 
4635 	udev = interface_to_usbdev(intf);
4636 	udev = usb_get_dev(udev);
4637 
4638 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4639 	if (!netdev) {
4640 		dev_err(&intf->dev, "Error: OOM\n");
4641 		ret = -ENOMEM;
4642 		goto out1;
4643 	}
4644 
4645 	SET_NETDEV_DEV(netdev, &intf->dev);
4646 
4647 	dev = netdev_priv(netdev);
4648 	dev->udev = udev;
4649 	dev->intf = intf;
4650 	dev->net = netdev;
4651 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4652 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4653 
4654 	skb_queue_head_init(&dev->rxq);
4655 	skb_queue_head_init(&dev->txq);
4656 	skb_queue_head_init(&dev->rxq_done);
4657 	skb_queue_head_init(&dev->txq_pend);
4658 	skb_queue_head_init(&dev->rxq_overflow);
4659 	mutex_init(&dev->mdiobus_mutex);
4660 	mutex_init(&dev->dev_mutex);
4661 
4662 	ret = lan78xx_urb_config_init(dev);
4663 	if (ret < 0)
4664 		goto out2;
4665 
4666 	ret = lan78xx_alloc_tx_resources(dev);
4667 	if (ret < 0)
4668 		goto out2;
4669 
4670 	ret = lan78xx_alloc_rx_resources(dev);
4671 	if (ret < 0)
4672 		goto out3;
4673 
4674 	/* MTU range: 68 - 9000 */
4675 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4676 
4677 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4678 
4679 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4680 
4681 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4682 	init_usb_anchor(&dev->deferred);
4683 
4684 	netdev->netdev_ops = &lan78xx_netdev_ops;
4685 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4686 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4687 
4688 	dev->delta = 1;
4689 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4690 
4691 	mutex_init(&dev->stats.access_lock);
4692 
4693 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4694 		ret = -ENODEV;
4695 		goto out4;
4696 	}
4697 
4698 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4699 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4700 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4701 		ret = -ENODEV;
4702 		goto out4;
4703 	}
4704 
4705 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4706 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4707 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4708 		ret = -ENODEV;
4709 		goto out4;
4710 	}
4711 
4712 	ep_intr = &intf->cur_altsetting->endpoint[2];
4713 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4714 		ret = -ENODEV;
4715 		goto out4;
4716 	}
4717 
4718 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4719 					usb_endpoint_num(&ep_intr->desc));
4720 
4721 	ret = lan78xx_bind(dev, intf);
4722 	if (ret < 0)
4723 		goto out4;
4724 
4725 	period = ep_intr->desc.bInterval;
4726 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4727 
4728 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4729 	if (!dev->urb_intr) {
4730 		ret = -ENOMEM;
4731 		goto out5;
4732 	}
4733 
4734 	buf = kmalloc(maxp, GFP_KERNEL);
4735 	if (!buf) {
4736 		ret = -ENOMEM;
4737 		goto free_urbs;
4738 	}
4739 
4740 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4741 			 dev->pipe_intr, buf, maxp,
4742 			 intr_complete, dev, period);
4743 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4744 
4745 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4746 
4747 	/* Reject broken descriptors. */
4748 	if (dev->maxpacket == 0) {
4749 		ret = -ENODEV;
4750 		goto free_urbs;
4751 	}
4752 
4753 	/* driver requires remote-wakeup capability during autosuspend. */
4754 	intf->needs_remote_wakeup = 1;
4755 
4756 	ret = lan78xx_phy_init(dev);
4757 	if (ret < 0)
4758 		goto free_urbs;
4759 
4760 	ret = register_netdev(netdev);
4761 	if (ret != 0) {
4762 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4763 		goto phy_uninit;
4764 	}
4765 
4766 	usb_set_intfdata(intf, dev);
4767 
4768 	ret = device_set_wakeup_enable(&udev->dev, true);
4769 
4770 	 /* Default delay of 2sec has more overhead than advantage.
4771 	  * Set to 10sec as default.
4772 	  */
4773 	pm_runtime_set_autosuspend_delay(&udev->dev,
4774 					 DEFAULT_AUTOSUSPEND_DELAY);
4775 
4776 	return 0;
4777 
4778 phy_uninit:
4779 	lan78xx_phy_uninit(dev);
4780 free_urbs:
4781 	usb_free_urb(dev->urb_intr);
4782 out5:
4783 	lan78xx_unbind(dev, intf);
4784 out4:
4785 	netif_napi_del(&dev->napi);
4786 	lan78xx_free_rx_resources(dev);
4787 out3:
4788 	lan78xx_free_tx_resources(dev);
4789 out2:
4790 	free_netdev(netdev);
4791 out1:
4792 	usb_put_dev(udev);
4793 
4794 	return ret;
4795 }
4796 
lan78xx_wakeframe_crc16(const u8 * buf,int len)4797 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4798 {
4799 	const u16 crc16poly = 0x8005;
4800 	int i;
4801 	u16 bit, crc, msb;
4802 	u8 data;
4803 
4804 	crc = 0xFFFF;
4805 	for (i = 0; i < len; i++) {
4806 		data = *buf++;
4807 		for (bit = 0; bit < 8; bit++) {
4808 			msb = crc >> 15;
4809 			crc <<= 1;
4810 
4811 			if (msb ^ (u16)(data & 1)) {
4812 				crc ^= crc16poly;
4813 				crc |= (u16)0x0001U;
4814 			}
4815 			data >>= 1;
4816 		}
4817 	}
4818 
4819 	return crc;
4820 }
4821 
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4822 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4823 {
4824 	u32 buf;
4825 	int ret;
4826 
4827 	ret = lan78xx_stop_tx_path(dev);
4828 	if (ret < 0)
4829 		return ret;
4830 
4831 	ret = lan78xx_stop_rx_path(dev);
4832 	if (ret < 0)
4833 		return ret;
4834 
4835 	/* auto suspend (selective suspend) */
4836 
4837 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4838 	if (ret < 0)
4839 		return ret;
4840 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4841 	if (ret < 0)
4842 		return ret;
4843 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4844 	if (ret < 0)
4845 		return ret;
4846 
4847 	/* set goodframe wakeup */
4848 
4849 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4850 	if (ret < 0)
4851 		return ret;
4852 
4853 	buf |= WUCSR_RFE_WAKE_EN_;
4854 	buf |= WUCSR_STORE_WAKE_;
4855 
4856 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4857 	if (ret < 0)
4858 		return ret;
4859 
4860 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4861 	if (ret < 0)
4862 		return ret;
4863 
4864 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4865 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4866 	buf |= PMT_CTL_PHY_WAKE_EN_;
4867 	buf |= PMT_CTL_WOL_EN_;
4868 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4869 	buf |= PMT_CTL_SUS_MODE_3_;
4870 
4871 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4872 	if (ret < 0)
4873 		return ret;
4874 
4875 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4876 	if (ret < 0)
4877 		return ret;
4878 
4879 	buf |= PMT_CTL_WUPS_MASK_;
4880 
4881 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4882 	if (ret < 0)
4883 		return ret;
4884 
4885 	ret = lan78xx_start_rx_path(dev);
4886 
4887 	return ret;
4888 }
4889 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4890 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4891 {
4892 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4893 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4894 	const u8 arp_type[2] = { 0x08, 0x06 };
4895 	u32 temp_pmt_ctl;
4896 	int mask_index;
4897 	u32 temp_wucsr;
4898 	u32 buf;
4899 	u16 crc;
4900 	int ret;
4901 
4902 	ret = lan78xx_stop_tx_path(dev);
4903 	if (ret < 0)
4904 		return ret;
4905 	ret = lan78xx_stop_rx_path(dev);
4906 	if (ret < 0)
4907 		return ret;
4908 
4909 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4910 	if (ret < 0)
4911 		return ret;
4912 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4913 	if (ret < 0)
4914 		return ret;
4915 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4916 	if (ret < 0)
4917 		return ret;
4918 
4919 	temp_wucsr = 0;
4920 
4921 	temp_pmt_ctl = 0;
4922 
4923 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4924 	if (ret < 0)
4925 		return ret;
4926 
4927 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4928 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4929 
4930 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4931 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4932 		if (ret < 0)
4933 			return ret;
4934 	}
4935 
4936 	mask_index = 0;
4937 	if (wol & WAKE_PHY) {
4938 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4939 
4940 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4941 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4942 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4943 	}
4944 	if (wol & WAKE_MAGIC) {
4945 		temp_wucsr |= WUCSR_MPEN_;
4946 
4947 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4948 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4949 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4950 	}
4951 	if (wol & WAKE_BCAST) {
4952 		temp_wucsr |= WUCSR_BCST_EN_;
4953 
4954 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4955 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4956 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4957 	}
4958 	if (wol & WAKE_MCAST) {
4959 		temp_wucsr |= WUCSR_WAKE_EN_;
4960 
4961 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4962 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4963 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4964 					WUF_CFGX_EN_ |
4965 					WUF_CFGX_TYPE_MCAST_ |
4966 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4967 					(crc & WUF_CFGX_CRC16_MASK_));
4968 		if (ret < 0)
4969 			return ret;
4970 
4971 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4972 		if (ret < 0)
4973 			return ret;
4974 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4975 		if (ret < 0)
4976 			return ret;
4977 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4978 		if (ret < 0)
4979 			return ret;
4980 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4981 		if (ret < 0)
4982 			return ret;
4983 
4984 		mask_index++;
4985 
4986 		/* for IPv6 Multicast */
4987 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4988 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4989 					WUF_CFGX_EN_ |
4990 					WUF_CFGX_TYPE_MCAST_ |
4991 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4992 					(crc & WUF_CFGX_CRC16_MASK_));
4993 		if (ret < 0)
4994 			return ret;
4995 
4996 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4997 		if (ret < 0)
4998 			return ret;
4999 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5000 		if (ret < 0)
5001 			return ret;
5002 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5003 		if (ret < 0)
5004 			return ret;
5005 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5006 		if (ret < 0)
5007 			return ret;
5008 
5009 		mask_index++;
5010 
5011 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5012 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5013 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5014 	}
5015 	if (wol & WAKE_UCAST) {
5016 		temp_wucsr |= WUCSR_PFDA_EN_;
5017 
5018 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5019 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5020 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5021 	}
5022 	if (wol & WAKE_ARP) {
5023 		temp_wucsr |= WUCSR_WAKE_EN_;
5024 
5025 		/* set WUF_CFG & WUF_MASK
5026 		 * for packettype (offset 12,13) = ARP (0x0806)
5027 		 */
5028 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5029 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5030 					WUF_CFGX_EN_ |
5031 					WUF_CFGX_TYPE_ALL_ |
5032 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5033 					(crc & WUF_CFGX_CRC16_MASK_));
5034 		if (ret < 0)
5035 			return ret;
5036 
5037 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5038 		if (ret < 0)
5039 			return ret;
5040 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5041 		if (ret < 0)
5042 			return ret;
5043 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5044 		if (ret < 0)
5045 			return ret;
5046 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5047 		if (ret < 0)
5048 			return ret;
5049 
5050 		mask_index++;
5051 
5052 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5053 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5054 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5055 	}
5056 
5057 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5058 	if (ret < 0)
5059 		return ret;
5060 
5061 	/* when multiple WOL bits are set */
5062 	if (hweight_long((unsigned long)wol) > 1) {
5063 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5064 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5065 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5066 	}
5067 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5068 	if (ret < 0)
5069 		return ret;
5070 
5071 	/* clear WUPS */
5072 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5073 	if (ret < 0)
5074 		return ret;
5075 
5076 	buf |= PMT_CTL_WUPS_MASK_;
5077 
5078 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5079 	if (ret < 0)
5080 		return ret;
5081 
5082 	ret = lan78xx_start_rx_path(dev);
5083 
5084 	return ret;
5085 }
5086 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)5087 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5088 {
5089 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5090 	bool dev_open;
5091 	int ret;
5092 
5093 	mutex_lock(&dev->dev_mutex);
5094 
5095 	netif_dbg(dev, ifdown, dev->net,
5096 		  "suspending: pm event %#x", message.event);
5097 
5098 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5099 
5100 	if (dev_open) {
5101 		spin_lock_irq(&dev->txq.lock);
5102 		/* don't autosuspend while transmitting */
5103 		if ((skb_queue_len(&dev->txq) ||
5104 		     skb_queue_len(&dev->txq_pend)) &&
5105 		    PMSG_IS_AUTO(message)) {
5106 			spin_unlock_irq(&dev->txq.lock);
5107 			ret = -EBUSY;
5108 			goto out;
5109 		} else {
5110 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5111 			spin_unlock_irq(&dev->txq.lock);
5112 		}
5113 
5114 		rtnl_lock();
5115 		phylink_suspend(dev->phylink, false);
5116 		rtnl_unlock();
5117 
5118 		/* stop RX */
5119 		ret = lan78xx_stop_rx_path(dev);
5120 		if (ret < 0)
5121 			goto out;
5122 
5123 		ret = lan78xx_flush_rx_fifo(dev);
5124 		if (ret < 0)
5125 			goto out;
5126 
5127 		/* stop Tx */
5128 		ret = lan78xx_stop_tx_path(dev);
5129 		if (ret < 0)
5130 			goto out;
5131 
5132 		/* empty out the Rx and Tx queues */
5133 		netif_device_detach(dev->net);
5134 		lan78xx_terminate_urbs(dev);
5135 		usb_kill_urb(dev->urb_intr);
5136 
5137 		/* reattach */
5138 		netif_device_attach(dev->net);
5139 
5140 		timer_delete(&dev->stat_monitor);
5141 
5142 		if (PMSG_IS_AUTO(message)) {
5143 			ret = lan78xx_set_auto_suspend(dev);
5144 			if (ret < 0)
5145 				goto out;
5146 		} else {
5147 			struct lan78xx_priv *pdata;
5148 
5149 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5150 			netif_carrier_off(dev->net);
5151 			ret = lan78xx_set_suspend(dev, pdata->wol);
5152 			if (ret < 0)
5153 				goto out;
5154 		}
5155 	} else {
5156 		/* Interface is down; don't allow WOL and PHY
5157 		 * events to wake up the host
5158 		 */
5159 		u32 buf;
5160 
5161 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5162 
5163 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5164 		if (ret < 0)
5165 			goto out;
5166 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5167 		if (ret < 0)
5168 			goto out;
5169 
5170 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5171 		if (ret < 0)
5172 			goto out;
5173 
5174 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5175 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5176 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5177 		buf |= PMT_CTL_SUS_MODE_3_;
5178 
5179 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5180 		if (ret < 0)
5181 			goto out;
5182 
5183 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5184 		if (ret < 0)
5185 			goto out;
5186 
5187 		buf |= PMT_CTL_WUPS_MASK_;
5188 
5189 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5190 		if (ret < 0)
5191 			goto out;
5192 	}
5193 
5194 	ret = 0;
5195 out:
5196 	mutex_unlock(&dev->dev_mutex);
5197 
5198 	return ret;
5199 }
5200 
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)5201 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5202 {
5203 	bool pipe_halted = false;
5204 	struct urb *urb;
5205 
5206 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5207 		struct sk_buff *skb = urb->context;
5208 		int ret;
5209 
5210 		if (!netif_device_present(dev->net) ||
5211 		    !netif_carrier_ok(dev->net) ||
5212 		    pipe_halted) {
5213 			lan78xx_release_tx_buf(dev, skb);
5214 			continue;
5215 		}
5216 
5217 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5218 
5219 		if (ret == 0) {
5220 			netif_trans_update(dev->net);
5221 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5222 		} else {
5223 			if (ret == -EPIPE) {
5224 				netif_stop_queue(dev->net);
5225 				pipe_halted = true;
5226 			} else if (ret == -ENODEV) {
5227 				netif_device_detach(dev->net);
5228 			}
5229 
5230 			lan78xx_release_tx_buf(dev, skb);
5231 		}
5232 	}
5233 
5234 	return pipe_halted;
5235 }
5236 
lan78xx_resume(struct usb_interface * intf)5237 static int lan78xx_resume(struct usb_interface *intf)
5238 {
5239 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5240 	bool dev_open;
5241 	int ret;
5242 
5243 	mutex_lock(&dev->dev_mutex);
5244 
5245 	netif_dbg(dev, ifup, dev->net, "resuming device");
5246 
5247 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5248 
5249 	if (dev_open) {
5250 		bool pipe_halted = false;
5251 
5252 		ret = lan78xx_flush_tx_fifo(dev);
5253 		if (ret < 0)
5254 			goto out;
5255 
5256 		if (dev->urb_intr) {
5257 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5258 
5259 			if (ret < 0) {
5260 				if (ret == -ENODEV)
5261 					netif_device_detach(dev->net);
5262 				netdev_warn(dev->net, "Failed to submit intr URB");
5263 			}
5264 		}
5265 
5266 		spin_lock_irq(&dev->txq.lock);
5267 
5268 		if (netif_device_present(dev->net)) {
5269 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5270 
5271 			if (pipe_halted)
5272 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5273 		}
5274 
5275 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5276 
5277 		spin_unlock_irq(&dev->txq.lock);
5278 
5279 		if (!pipe_halted &&
5280 		    netif_device_present(dev->net) &&
5281 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5282 			netif_start_queue(dev->net);
5283 
5284 		ret = lan78xx_start_tx_path(dev);
5285 		if (ret < 0)
5286 			goto out;
5287 
5288 		napi_schedule(&dev->napi);
5289 
5290 		if (!timer_pending(&dev->stat_monitor)) {
5291 			dev->delta = 1;
5292 			mod_timer(&dev->stat_monitor,
5293 				  jiffies + STAT_UPDATE_TIMER);
5294 		}
5295 
5296 	} else {
5297 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5298 	}
5299 
5300 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5301 	if (ret < 0)
5302 		goto out;
5303 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5304 	if (ret < 0)
5305 		goto out;
5306 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5307 	if (ret < 0)
5308 		goto out;
5309 
5310 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5311 					     WUCSR2_ARP_RCD_ |
5312 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5313 					     WUCSR2_IPV4_TCPSYN_RCD_);
5314 	if (ret < 0)
5315 		goto out;
5316 
5317 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5318 					    WUCSR_EEE_RX_WAKE_ |
5319 					    WUCSR_PFDA_FR_ |
5320 					    WUCSR_RFE_WAKE_FR_ |
5321 					    WUCSR_WUFR_ |
5322 					    WUCSR_MPR_ |
5323 					    WUCSR_BCST_FR_);
5324 	if (ret < 0)
5325 		goto out;
5326 
5327 	ret = 0;
5328 out:
5329 	mutex_unlock(&dev->dev_mutex);
5330 
5331 	return ret;
5332 }
5333 
lan78xx_reset_resume(struct usb_interface * intf)5334 static int lan78xx_reset_resume(struct usb_interface *intf)
5335 {
5336 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5337 	int ret;
5338 
5339 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5340 
5341 	ret = lan78xx_reset(dev);
5342 	if (ret < 0)
5343 		return ret;
5344 
5345 	ret = lan78xx_resume(intf);
5346 	if (ret < 0)
5347 		return ret;
5348 
5349 	rtnl_lock();
5350 	phylink_resume(dev->phylink);
5351 	rtnl_unlock();
5352 
5353 	return 0;
5354 }
5355 
5356 static const struct usb_device_id products[] = {
5357 	{
5358 	/* LAN7800 USB Gigabit Ethernet Device */
5359 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5360 	},
5361 	{
5362 	/* LAN7850 USB Gigabit Ethernet Device */
5363 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5364 	},
5365 	{
5366 	/* LAN7801 USB Gigabit Ethernet Device */
5367 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5368 	},
5369 	{
5370 	/* ATM2-AF USB Gigabit Ethernet Device */
5371 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5372 	},
5373 	{},
5374 };
5375 MODULE_DEVICE_TABLE(usb, products);
5376 
5377 static struct usb_driver lan78xx_driver = {
5378 	.name			= DRIVER_NAME,
5379 	.id_table		= products,
5380 	.probe			= lan78xx_probe,
5381 	.disconnect		= lan78xx_disconnect,
5382 	.suspend		= lan78xx_suspend,
5383 	.resume			= lan78xx_resume,
5384 	.reset_resume		= lan78xx_reset_resume,
5385 	.supports_autosuspend	= 1,
5386 	.disable_hub_initiated_lpm = 1,
5387 };
5388 
5389 module_usb_driver(lan78xx_driver);
5390 
5391 MODULE_AUTHOR(DRIVER_AUTHOR);
5392 MODULE_DESCRIPTION(DRIVER_DESC);
5393 MODULE_LICENSE("GPL");
5394