1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Microchip Technology
4 */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES (5 * HZ)
38 #define THROTTLE_JIFFIES (HZ / 8)
39 #define UNLINK_TIMEOUT_MS 3
40
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
42
43 #define SS_USB_PKT_SIZE (1024)
44 #define HS_USB_PKT_SIZE (512)
45 #define FS_USB_PKT_SIZE (64)
46
47 #define MAX_RX_FIFO_SIZE (12 * 1024)
48 #define MAX_TX_FIFO_SIZE (12 * 1024)
49
50 #define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \
52 (FLOW_THRESHOLD(off) << 8))
53
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS 9216
56 #define FLOW_ON_HS 8704
57
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS 4096
60 #define FLOW_OFF_HS 1024
61
62 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE (9000)
65 #define DEFAULT_TX_CSUM_ENABLE (true)
66 #define DEFAULT_RX_CSUM_ENABLE (true)
67 #define DEFAULT_TSO_CSUM_ENABLE (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD (true)
70 #define TX_ALIGNMENT (4)
71 #define RXW_PADDING 2
72
73 #define LAN78XX_USB_VENDOR_ID (0x0424)
74 #define LAN7800_USB_PRODUCT_ID (0x7800)
75 #define LAN7850_USB_PRODUCT_ID (0x7850)
76 #define LAN7801_USB_PRODUCT_ID (0x7801)
77 #define LAN78XX_EEPROM_MAGIC (0x78A5)
78 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
81
82 #define MII_READ 1
83 #define MII_WRITE 0
84
85 #define EEPROM_INDICATOR (0xA5)
86 #define EEPROM_MAC_OFFSET (0x01)
87 #define MAX_EEPROM_SIZE 512
88 #define OTP_INDICATOR_1 (0xF3)
89 #define OTP_INDICATOR_2 (0xF7)
90
91 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
92 WAKE_MCAST | WAKE_BCAST | \
93 WAKE_ARP | WAKE_MAGIC)
94
95 #define TX_URB_NUM 10
96 #define TX_SS_URB_NUM TX_URB_NUM
97 #define TX_HS_URB_NUM TX_URB_NUM
98 #define TX_FS_URB_NUM TX_URB_NUM
99
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101 */
102 #define TX_SS_URB_SIZE (32 * 1024)
103 #define TX_HS_URB_SIZE (16 * 1024)
104 #define TX_FS_URB_SIZE (10 * 1024)
105
106 #define RX_SS_URB_NUM 30
107 #define RX_HS_URB_NUM 10
108 #define RX_FS_URB_NUM 10
109 #define RX_SS_URB_SIZE TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE TX_FS_URB_SIZE
112
113 #define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY 0x2000
115 #define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY 0x2000
117 #define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY 0x2000
119
120 #define TX_CMD_LEN 8
121 #define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123
124 #define RX_CMD_LEN 10
125 #define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
127
128 /* USB related defines */
129 #define BULK_IN_PIPE 1
130 #define BULK_OUT_PIPE 2
131
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
134
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER (1 * 1000)
137
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT (HZ / 10)
140
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS 1
143
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP (32)
146 #define INT_EP_INTEP (31)
147 #define INT_EP_OTP_WR_DONE (28)
148 #define INT_EP_EEE_TX_LPI_START (26)
149 #define INT_EP_EEE_TX_LPI_STOP (25)
150 #define INT_EP_EEE_RX_LPI (24)
151 #define INT_EP_MAC_RESET_TIMEOUT (23)
152 #define INT_EP_RDFO (22)
153 #define INT_EP_TXE (21)
154 #define INT_EP_USB_STATUS (20)
155 #define INT_EP_TX_DIS (19)
156 #define INT_EP_RX_DIS (18)
157 #define INT_EP_PHY (17)
158 #define INT_EP_DP (16)
159 #define INT_EP_MAC_ERR (15)
160 #define INT_EP_TDFU (14)
161 #define INT_EP_TDFO (13)
162 #define INT_EP_UTX (12)
163 #define INT_EP_GPIO_11 (11)
164 #define INT_EP_GPIO_10 (10)
165 #define INT_EP_GPIO_9 (9)
166 #define INT_EP_GPIO_8 (8)
167 #define INT_EP_GPIO_7 (7)
168 #define INT_EP_GPIO_6 (6)
169 #define INT_EP_GPIO_5 (5)
170 #define INT_EP_GPIO_4 (4)
171 #define INT_EP_GPIO_3 (3)
172 #define INT_EP_GPIO_2 (2)
173 #define INT_EP_GPIO_1 (1)
174 #define INT_EP_GPIO_0 (0)
175
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 "RX FCS Errors",
178 "RX Alignment Errors",
179 "Rx Fragment Errors",
180 "RX Jabber Errors",
181 "RX Undersize Frame Errors",
182 "RX Oversize Frame Errors",
183 "RX Dropped Frames",
184 "RX Unicast Byte Count",
185 "RX Broadcast Byte Count",
186 "RX Multicast Byte Count",
187 "RX Unicast Frames",
188 "RX Broadcast Frames",
189 "RX Multicast Frames",
190 "RX Pause Frames",
191 "RX 64 Byte Frames",
192 "RX 65 - 127 Byte Frames",
193 "RX 128 - 255 Byte Frames",
194 "RX 256 - 511 Bytes Frames",
195 "RX 512 - 1023 Byte Frames",
196 "RX 1024 - 1518 Byte Frames",
197 "RX Greater 1518 Byte Frames",
198 "EEE RX LPI Transitions",
199 "EEE RX LPI Time",
200 "TX FCS Errors",
201 "TX Excess Deferral Errors",
202 "TX Carrier Errors",
203 "TX Bad Byte Count",
204 "TX Single Collisions",
205 "TX Multiple Collisions",
206 "TX Excessive Collision",
207 "TX Late Collisions",
208 "TX Unicast Byte Count",
209 "TX Broadcast Byte Count",
210 "TX Multicast Byte Count",
211 "TX Unicast Frames",
212 "TX Broadcast Frames",
213 "TX Multicast Frames",
214 "TX Pause Frames",
215 "TX 64 Byte Frames",
216 "TX 65 - 127 Byte Frames",
217 "TX 128 - 255 Byte Frames",
218 "TX 256 - 511 Bytes Frames",
219 "TX 512 - 1023 Byte Frames",
220 "TX 1024 - 1518 Byte Frames",
221 "TX Greater 1518 Byte Frames",
222 "EEE TX LPI Transitions",
223 "EEE TX LPI Time",
224 };
225
226 struct lan78xx_statstage {
227 u32 rx_fcs_errors;
228 u32 rx_alignment_errors;
229 u32 rx_fragment_errors;
230 u32 rx_jabber_errors;
231 u32 rx_undersize_frame_errors;
232 u32 rx_oversize_frame_errors;
233 u32 rx_dropped_frames;
234 u32 rx_unicast_byte_count;
235 u32 rx_broadcast_byte_count;
236 u32 rx_multicast_byte_count;
237 u32 rx_unicast_frames;
238 u32 rx_broadcast_frames;
239 u32 rx_multicast_frames;
240 u32 rx_pause_frames;
241 u32 rx_64_byte_frames;
242 u32 rx_65_127_byte_frames;
243 u32 rx_128_255_byte_frames;
244 u32 rx_256_511_bytes_frames;
245 u32 rx_512_1023_byte_frames;
246 u32 rx_1024_1518_byte_frames;
247 u32 rx_greater_1518_byte_frames;
248 u32 eee_rx_lpi_transitions;
249 u32 eee_rx_lpi_time;
250 u32 tx_fcs_errors;
251 u32 tx_excess_deferral_errors;
252 u32 tx_carrier_errors;
253 u32 tx_bad_byte_count;
254 u32 tx_single_collisions;
255 u32 tx_multiple_collisions;
256 u32 tx_excessive_collision;
257 u32 tx_late_collisions;
258 u32 tx_unicast_byte_count;
259 u32 tx_broadcast_byte_count;
260 u32 tx_multicast_byte_count;
261 u32 tx_unicast_frames;
262 u32 tx_broadcast_frames;
263 u32 tx_multicast_frames;
264 u32 tx_pause_frames;
265 u32 tx_64_byte_frames;
266 u32 tx_65_127_byte_frames;
267 u32 tx_128_255_byte_frames;
268 u32 tx_256_511_bytes_frames;
269 u32 tx_512_1023_byte_frames;
270 u32 tx_1024_1518_byte_frames;
271 u32 tx_greater_1518_byte_frames;
272 u32 eee_tx_lpi_transitions;
273 u32 eee_tx_lpi_time;
274 };
275
276 struct lan78xx_statstage64 {
277 u64 rx_fcs_errors;
278 u64 rx_alignment_errors;
279 u64 rx_fragment_errors;
280 u64 rx_jabber_errors;
281 u64 rx_undersize_frame_errors;
282 u64 rx_oversize_frame_errors;
283 u64 rx_dropped_frames;
284 u64 rx_unicast_byte_count;
285 u64 rx_broadcast_byte_count;
286 u64 rx_multicast_byte_count;
287 u64 rx_unicast_frames;
288 u64 rx_broadcast_frames;
289 u64 rx_multicast_frames;
290 u64 rx_pause_frames;
291 u64 rx_64_byte_frames;
292 u64 rx_65_127_byte_frames;
293 u64 rx_128_255_byte_frames;
294 u64 rx_256_511_bytes_frames;
295 u64 rx_512_1023_byte_frames;
296 u64 rx_1024_1518_byte_frames;
297 u64 rx_greater_1518_byte_frames;
298 u64 eee_rx_lpi_transitions;
299 u64 eee_rx_lpi_time;
300 u64 tx_fcs_errors;
301 u64 tx_excess_deferral_errors;
302 u64 tx_carrier_errors;
303 u64 tx_bad_byte_count;
304 u64 tx_single_collisions;
305 u64 tx_multiple_collisions;
306 u64 tx_excessive_collision;
307 u64 tx_late_collisions;
308 u64 tx_unicast_byte_count;
309 u64 tx_broadcast_byte_count;
310 u64 tx_multicast_byte_count;
311 u64 tx_unicast_frames;
312 u64 tx_broadcast_frames;
313 u64 tx_multicast_frames;
314 u64 tx_pause_frames;
315 u64 tx_64_byte_frames;
316 u64 tx_65_127_byte_frames;
317 u64 tx_128_255_byte_frames;
318 u64 tx_256_511_bytes_frames;
319 u64 tx_512_1023_byte_frames;
320 u64 tx_1024_1518_byte_frames;
321 u64 tx_greater_1518_byte_frames;
322 u64 eee_tx_lpi_transitions;
323 u64 eee_tx_lpi_time;
324 };
325
326 static u32 lan78xx_regs[] = {
327 ID_REV,
328 INT_STS,
329 HW_CFG,
330 PMT_CTL,
331 E2P_CMD,
332 E2P_DATA,
333 USB_STATUS,
334 VLAN_TYPE,
335 MAC_CR,
336 MAC_RX,
337 MAC_TX,
338 FLOW,
339 ERR_STS,
340 MII_ACC,
341 MII_DATA,
342 EEE_TX_LPI_REQ_DLY,
343 EEE_TW_TX_SYS,
344 EEE_TX_LPI_REM_DLY,
345 WUCSR
346 };
347
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349
350 struct lan78xx_net;
351
352 struct lan78xx_priv {
353 struct lan78xx_net *dev;
354 u32 rfe_ctl;
355 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 struct mutex dataport_mutex; /* for dataport access */
359 spinlock_t rfe_ctl_lock; /* for rfe register access */
360 struct work_struct set_multicast;
361 struct work_struct set_vlan;
362 u32 wol;
363 };
364
365 enum skb_state {
366 illegal = 0,
367 tx_start,
368 tx_done,
369 rx_start,
370 rx_done,
371 rx_cleanup,
372 unlink_start
373 };
374
375 struct skb_data { /* skb->cb is one of these */
376 struct urb *urb;
377 struct lan78xx_net *dev;
378 enum skb_state state;
379 size_t length;
380 int num_of_packet;
381 };
382
383 #define EVENT_TX_HALT 0
384 #define EVENT_RX_HALT 1
385 #define EVENT_RX_MEMORY 2
386 #define EVENT_STS_SPLIT 3
387 #define EVENT_LINK_RESET 4
388 #define EVENT_RX_PAUSED 5
389 #define EVENT_DEV_WAKING 6
390 #define EVENT_DEV_ASLEEP 7
391 #define EVENT_DEV_OPEN 8
392 #define EVENT_STAT_UPDATE 9
393 #define EVENT_DEV_DISCONNECT 10
394
395 struct statstage {
396 struct mutex access_lock; /* for stats access */
397 struct lan78xx_statstage saved;
398 struct lan78xx_statstage rollover_count;
399 struct lan78xx_statstage rollover_max;
400 struct lan78xx_statstage64 curr_stat;
401 };
402
403 struct irq_domain_data {
404 struct irq_domain *irqdomain;
405 unsigned int phyirq;
406 struct irq_chip *irqchip;
407 irq_flow_handler_t irq_handler;
408 u32 irqenable;
409 struct mutex irq_lock; /* for irq bus access */
410 };
411
412 struct lan78xx_net {
413 struct net_device *net;
414 struct usb_device *udev;
415 struct usb_interface *intf;
416 void *driver_priv;
417
418 unsigned int tx_pend_data_len;
419 size_t n_tx_urbs;
420 size_t n_rx_urbs;
421 size_t tx_urb_size;
422 size_t rx_urb_size;
423
424 struct sk_buff_head rxq_free;
425 struct sk_buff_head rxq;
426 struct sk_buff_head rxq_done;
427 struct sk_buff_head rxq_overflow;
428 struct sk_buff_head txq_free;
429 struct sk_buff_head txq;
430 struct sk_buff_head txq_pend;
431
432 struct napi_struct napi;
433
434 struct delayed_work wq;
435
436 int msg_enable;
437
438 struct urb *urb_intr;
439 struct usb_anchor deferred;
440
441 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
442 struct mutex mdiobus_mutex; /* for MDIO bus access */
443 unsigned int pipe_in, pipe_out, pipe_intr;
444
445 unsigned int bulk_in_delay;
446 unsigned int burst_cap;
447
448 unsigned long flags;
449
450 wait_queue_head_t *wait;
451 unsigned char suspend_count;
452
453 unsigned int maxpacket;
454 struct timer_list stat_monitor;
455
456 unsigned long data[5];
457
458 int link_on;
459 u8 mdix_ctrl;
460
461 u32 chipid;
462 u32 chiprev;
463 struct mii_bus *mdiobus;
464 phy_interface_t interface;
465
466 int fc_autoneg;
467 u8 fc_request_control;
468
469 int delta;
470 struct statstage stats;
471
472 struct irq_domain_data domain_data;
473 };
474
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479
lan78xx_get_buf(struct sk_buff_head * buf_pool)480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 if (skb_queue_empty(buf_pool))
483 return NULL;
484
485 return skb_dequeue(buf_pool);
486 }
487
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 struct sk_buff *buf)
490 {
491 buf->data = buf->head;
492 skb_reset_tail_pointer(buf);
493
494 buf->len = 0;
495 buf->data_len = 0;
496
497 skb_queue_tail(buf_pool, buf);
498 }
499
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 struct skb_data *entry;
503 struct sk_buff *buf;
504
505 while (!skb_queue_empty(buf_pool)) {
506 buf = skb_dequeue(buf_pool);
507 if (buf) {
508 entry = (struct skb_data *)buf->cb;
509 usb_free_urb(entry->urb);
510 dev_kfree_skb_any(buf);
511 }
512 }
513 }
514
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 size_t n_urbs, size_t urb_size,
517 struct lan78xx_net *dev)
518 {
519 struct skb_data *entry;
520 struct sk_buff *buf;
521 struct urb *urb;
522 int i;
523
524 skb_queue_head_init(buf_pool);
525
526 for (i = 0; i < n_urbs; i++) {
527 buf = alloc_skb(urb_size, GFP_ATOMIC);
528 if (!buf)
529 goto error;
530
531 if (skb_linearize(buf) != 0) {
532 dev_kfree_skb_any(buf);
533 goto error;
534 }
535
536 urb = usb_alloc_urb(0, GFP_ATOMIC);
537 if (!urb) {
538 dev_kfree_skb_any(buf);
539 goto error;
540 }
541
542 entry = (struct skb_data *)buf->cb;
543 entry->urb = urb;
544 entry->dev = dev;
545 entry->length = 0;
546 entry->num_of_packet = 0;
547
548 skb_queue_tail(buf_pool, buf);
549 }
550
551 return 0;
552
553 error:
554 lan78xx_free_buf_pool(buf_pool);
555
556 return -ENOMEM;
557 }
558
lan78xx_get_rx_buf(struct lan78xx_net * dev)559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 return lan78xx_get_buf(&dev->rxq_free);
562 }
563
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 struct sk_buff *rx_buf)
566 {
567 lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569
lan78xx_free_rx_resources(struct lan78xx_net * dev)570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580
lan78xx_get_tx_buf(struct lan78xx_net * dev)581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 return lan78xx_get_buf(&dev->txq_free);
584 }
585
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 struct sk_buff *tx_buf)
588 {
589 lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591
lan78xx_free_tx_resources(struct lan78xx_net * dev)592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 lan78xx_free_buf_pool(&dev->txq_free);
595 }
596
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 return lan78xx_alloc_buf_pool(&dev->txq_free,
600 dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 u32 *buf;
606 int ret;
607
608 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 return -ENODEV;
610
611 buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 if (!buf)
613 return -ENOMEM;
614
615 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 USB_VENDOR_REQUEST_READ_REGISTER,
617 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 if (likely(ret >= 0)) {
620 le32_to_cpus(buf);
621 *data = *buf;
622 } else if (net_ratelimit()) {
623 netdev_warn(dev->net,
624 "Failed to read register index 0x%08x. ret = %pe",
625 index, ERR_PTR(ret));
626 }
627
628 kfree(buf);
629
630 return ret < 0 ? ret : 0;
631 }
632
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 u32 *buf;
636 int ret;
637
638 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 return -ENODEV;
640
641 buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 if (!buf)
643 return -ENOMEM;
644
645 *buf = data;
646 cpu_to_le32s(buf);
647
648 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 USB_VENDOR_REQUEST_WRITE_REGISTER,
650 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 if (unlikely(ret < 0) &&
653 net_ratelimit()) {
654 netdev_warn(dev->net,
655 "Failed to write register index 0x%08x. ret = %pe",
656 index, ERR_PTR(ret));
657 }
658
659 kfree(buf);
660
661 return ret < 0 ? ret : 0;
662 }
663
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 u32 data)
666 {
667 int ret;
668 u32 buf;
669
670 ret = lan78xx_read_reg(dev, reg, &buf);
671 if (ret < 0)
672 return ret;
673
674 buf &= ~mask;
675 buf |= (mask & data);
676
677 return lan78xx_write_reg(dev, reg, buf);
678 }
679
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 struct lan78xx_statstage *data)
682 {
683 int ret = 0;
684 int i;
685 struct lan78xx_statstage *stats;
686 u32 *src;
687 u32 *dst;
688
689 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 if (!stats)
691 return -ENOMEM;
692
693 ret = usb_control_msg(dev->udev,
694 usb_rcvctrlpipe(dev->udev, 0),
695 USB_VENDOR_REQUEST_GET_STATS,
696 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 0,
698 0,
699 (void *)stats,
700 sizeof(*stats),
701 USB_CTRL_SET_TIMEOUT);
702 if (likely(ret >= 0)) {
703 src = (u32 *)stats;
704 dst = (u32 *)data;
705 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 le32_to_cpus(&src[i]);
707 dst[i] = src[i];
708 }
709 } else {
710 netdev_warn(dev->net,
711 "Failed to read stat ret = %d", ret);
712 }
713
714 kfree(stats);
715
716 return ret;
717 }
718
719 #define check_counter_rollover(struct1, dev_stats, member) \
720 do { \
721 if ((struct1)->member < (dev_stats).saved.member) \
722 (dev_stats).rollover_count.member++; \
723 } while (0)
724
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 struct lan78xx_statstage *stats)
727 {
728 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775
776 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778
lan78xx_update_stats(struct lan78xx_net * dev)779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 u32 *p, *count, *max;
782 u64 *data;
783 int i;
784 struct lan78xx_statstage lan78xx_stats;
785
786 if (usb_autopm_get_interface(dev->intf) < 0)
787 return;
788
789 p = (u32 *)&lan78xx_stats;
790 count = (u32 *)&dev->stats.rollover_count;
791 max = (u32 *)&dev->stats.rollover_max;
792 data = (u64 *)&dev->stats.curr_stat;
793
794 mutex_lock(&dev->stats.access_lock);
795
796 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798
799 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801
802 mutex_unlock(&dev->stats.access_lock);
803
804 usb_autopm_put_interface(dev->intf);
805 }
806
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 u32 hw_disabled)
814 {
815 unsigned long timeout;
816 bool stopped = true;
817 int ret;
818 u32 buf;
819
820 /* Stop the h/w block (if not already stopped) */
821
822 ret = lan78xx_read_reg(dev, reg, &buf);
823 if (ret < 0)
824 return ret;
825
826 if (buf & hw_enabled) {
827 buf &= ~hw_enabled;
828
829 ret = lan78xx_write_reg(dev, reg, buf);
830 if (ret < 0)
831 return ret;
832
833 stopped = false;
834 timeout = jiffies + HW_DISABLE_TIMEOUT;
835 do {
836 ret = lan78xx_read_reg(dev, reg, &buf);
837 if (ret < 0)
838 return ret;
839
840 if (buf & hw_disabled)
841 stopped = true;
842 else
843 msleep(HW_DISABLE_DELAY_MS);
844 } while (!stopped && !time_after(jiffies, timeout));
845 }
846
847 return stopped ? 0 : -ETIMEDOUT;
848 }
849
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)850 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851 {
852 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853 }
854
lan78xx_start_tx_path(struct lan78xx_net * dev)855 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856 {
857 int ret;
858
859 netif_dbg(dev, drv, dev->net, "start tx path");
860
861 /* Start the MAC transmitter */
862
863 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 if (ret < 0)
865 return ret;
866
867 /* Start the Tx FIFO */
868
869 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 if (ret < 0)
871 return ret;
872
873 return 0;
874 }
875
lan78xx_stop_tx_path(struct lan78xx_net * dev)876 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877 {
878 int ret;
879
880 netif_dbg(dev, drv, dev->net, "stop tx path");
881
882 /* Stop the Tx FIFO */
883
884 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 if (ret < 0)
886 return ret;
887
888 /* Stop the MAC transmitter */
889
890 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 if (ret < 0)
892 return ret;
893
894 return 0;
895 }
896
897 /* The caller must ensure the Tx path is stopped before calling
898 * lan78xx_flush_tx_fifo().
899 */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)900 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901 {
902 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903 }
904
lan78xx_start_rx_path(struct lan78xx_net * dev)905 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906 {
907 int ret;
908
909 netif_dbg(dev, drv, dev->net, "start rx path");
910
911 /* Start the Rx FIFO */
912
913 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 if (ret < 0)
915 return ret;
916
917 /* Start the MAC receiver*/
918
919 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 if (ret < 0)
921 return ret;
922
923 return 0;
924 }
925
lan78xx_stop_rx_path(struct lan78xx_net * dev)926 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927 {
928 int ret;
929
930 netif_dbg(dev, drv, dev->net, "stop rx path");
931
932 /* Stop the MAC receiver */
933
934 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 if (ret < 0)
936 return ret;
937
938 /* Stop the Rx FIFO */
939
940 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 if (ret < 0)
942 return ret;
943
944 return 0;
945 }
946
947 /* The caller must ensure the Rx path is stopped before calling
948 * lan78xx_flush_rx_fifo().
949 */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)950 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951 {
952 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953 }
954
955 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
lan78xx_mdiobus_wait_not_busy(struct lan78xx_net * dev)956 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
957 {
958 unsigned long start_time = jiffies;
959 u32 val;
960 int ret;
961
962 do {
963 ret = lan78xx_read_reg(dev, MII_ACC, &val);
964 if (ret < 0)
965 return ret;
966
967 if (!(val & MII_ACC_MII_BUSY_))
968 return 0;
969 } while (!time_after(jiffies, start_time + HZ));
970
971 return -ETIMEDOUT;
972 }
973
mii_access(int id,int index,int read)974 static inline u32 mii_access(int id, int index, int read)
975 {
976 u32 ret;
977
978 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 if (read)
981 ret |= MII_ACC_MII_READ_;
982 else
983 ret |= MII_ACC_MII_WRITE_;
984 ret |= MII_ACC_MII_BUSY_;
985
986 return ret;
987 }
988
lan78xx_wait_eeprom(struct lan78xx_net * dev)989 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990 {
991 unsigned long start_time = jiffies;
992 u32 val;
993 int ret;
994
995 do {
996 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
997 if (ret < 0)
998 return ret;
999
1000 if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 (val & E2P_CMD_EPC_TIMEOUT_))
1002 break;
1003 usleep_range(40, 100);
1004 } while (!time_after(jiffies, start_time + HZ));
1005
1006 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 netdev_warn(dev->net, "EEPROM read operation timeout");
1008 return -ETIMEDOUT;
1009 }
1010
1011 return 0;
1012 }
1013
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)1014 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015 {
1016 unsigned long start_time = jiffies;
1017 u32 val;
1018 int ret;
1019
1020 do {
1021 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1022 if (ret < 0)
1023 return ret;
1024
1025 if (!(val & E2P_CMD_EPC_BUSY_))
1026 return 0;
1027
1028 usleep_range(40, 100);
1029 } while (!time_after(jiffies, start_time + HZ));
1030
1031 netdev_warn(dev->net, "EEPROM is busy");
1032 return -ETIMEDOUT;
1033 }
1034
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1035 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 u32 length, u8 *data)
1037 {
1038 u32 val, saved;
1039 int i, ret;
1040
1041 /* depends on chip, some EEPROM pins are muxed with LED function.
1042 * disable & restore LED function to access EEPROM.
1043 */
1044 ret = lan78xx_read_reg(dev, HW_CFG, &val);
1045 if (ret < 0)
1046 return ret;
1047
1048 saved = val;
1049 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1050 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 ret = lan78xx_write_reg(dev, HW_CFG, val);
1052 if (ret < 0)
1053 return ret;
1054 }
1055
1056 ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 if (ret == -ETIMEDOUT)
1058 goto read_raw_eeprom_done;
1059 /* If USB fails, there is nothing to do */
1060 if (ret < 0)
1061 return ret;
1062
1063 for (i = 0; i < length; i++) {
1064 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1067 if (ret < 0)
1068 return ret;
1069
1070 ret = lan78xx_wait_eeprom(dev);
1071 /* Looks like not USB specific error, try to recover */
1072 if (ret == -ETIMEDOUT)
1073 goto read_raw_eeprom_done;
1074 /* If USB fails, there is nothing to do */
1075 if (ret < 0)
1076 return ret;
1077
1078 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1079 if (ret < 0)
1080 return ret;
1081
1082 data[i] = val & 0xFF;
1083 offset++;
1084 }
1085
1086 read_raw_eeprom_done:
1087 if (dev->chipid == ID_REV_CHIP_ID_7800_)
1088 return lan78xx_write_reg(dev, HW_CFG, saved);
1089
1090 return 0;
1091 }
1092
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1093 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 u32 length, u8 *data)
1095 {
1096 int ret;
1097 u8 sig;
1098
1099 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1100 if (ret < 0)
1101 return ret;
1102
1103 if (sig != EEPROM_INDICATOR)
1104 return -ENODATA;
1105
1106 return lan78xx_read_raw_eeprom(dev, offset, length, data);
1107 }
1108
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1109 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 u32 length, u8 *data)
1111 {
1112 u32 val;
1113 u32 saved;
1114 int i, ret;
1115
1116 /* depends on chip, some EEPROM pins are muxed with LED function.
1117 * disable & restore LED function to access EEPROM.
1118 */
1119 ret = lan78xx_read_reg(dev, HW_CFG, &val);
1120 if (ret < 0)
1121 return ret;
1122
1123 saved = val;
1124 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1125 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 ret = lan78xx_write_reg(dev, HW_CFG, val);
1127 if (ret < 0)
1128 return ret;
1129 }
1130
1131 ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 /* Looks like not USB specific error, try to recover */
1133 if (ret == -ETIMEDOUT)
1134 goto write_raw_eeprom_done;
1135 /* If USB fails, there is nothing to do */
1136 if (ret < 0)
1137 return ret;
1138
1139 /* Issue write/erase enable command */
1140 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1142 if (ret < 0)
1143 return ret;
1144
1145 ret = lan78xx_wait_eeprom(dev);
1146 /* Looks like not USB specific error, try to recover */
1147 if (ret == -ETIMEDOUT)
1148 goto write_raw_eeprom_done;
1149 /* If USB fails, there is nothing to do */
1150 if (ret < 0)
1151 return ret;
1152
1153 for (i = 0; i < length; i++) {
1154 /* Fill data register */
1155 val = data[i];
1156 ret = lan78xx_write_reg(dev, E2P_DATA, val);
1157 if (ret < 0)
1158 return ret;
1159
1160 /* Send "write" command */
1161 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1164 if (ret < 0)
1165 return ret;
1166
1167 ret = lan78xx_wait_eeprom(dev);
1168 /* Looks like not USB specific error, try to recover */
1169 if (ret == -ETIMEDOUT)
1170 goto write_raw_eeprom_done;
1171 /* If USB fails, there is nothing to do */
1172 if (ret < 0)
1173 return ret;
1174
1175 offset++;
1176 }
1177
1178 write_raw_eeprom_done:
1179 if (dev->chipid == ID_REV_CHIP_ID_7800_)
1180 return lan78xx_write_reg(dev, HW_CFG, saved);
1181
1182 return 0;
1183 }
1184
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1185 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 u32 length, u8 *data)
1187 {
1188 unsigned long timeout;
1189 int ret, i;
1190 u32 buf;
1191
1192 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 if (ret < 0)
1194 return ret;
1195
1196 if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 /* clear it and wait to be cleared */
1198 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 if (ret < 0)
1200 return ret;
1201
1202 timeout = jiffies + HZ;
1203 do {
1204 usleep_range(1, 10);
1205 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 if (ret < 0)
1207 return ret;
1208
1209 if (time_after(jiffies, timeout)) {
1210 netdev_warn(dev->net,
1211 "timeout on OTP_PWR_DN");
1212 return -ETIMEDOUT;
1213 }
1214 } while (buf & OTP_PWR_DN_PWRDN_N_);
1215 }
1216
1217 for (i = 0; i < length; i++) {
1218 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 if (ret < 0)
1221 return ret;
1222
1223 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 ((offset + i) & OTP_ADDR2_10_3));
1225 if (ret < 0)
1226 return ret;
1227
1228 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 if (ret < 0)
1230 return ret;
1231
1232 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 if (ret < 0)
1234 return ret;
1235
1236 timeout = jiffies + HZ;
1237 do {
1238 udelay(1);
1239 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 if (ret < 0)
1241 return ret;
1242
1243 if (time_after(jiffies, timeout)) {
1244 netdev_warn(dev->net,
1245 "timeout on OTP_STATUS");
1246 return -ETIMEDOUT;
1247 }
1248 } while (buf & OTP_STATUS_BUSY_);
1249
1250 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 if (ret < 0)
1252 return ret;
1253
1254 data[i] = (u8)(buf & 0xFF);
1255 }
1256
1257 return 0;
1258 }
1259
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1260 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 u32 length, u8 *data)
1262 {
1263 int i;
1264 u32 buf;
1265 unsigned long timeout;
1266 int ret;
1267
1268 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 if (ret < 0)
1270 return ret;
1271
1272 if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 /* clear it and wait to be cleared */
1274 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 if (ret < 0)
1276 return ret;
1277
1278 timeout = jiffies + HZ;
1279 do {
1280 udelay(1);
1281 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 if (ret < 0)
1283 return ret;
1284
1285 if (time_after(jiffies, timeout)) {
1286 netdev_warn(dev->net,
1287 "timeout on OTP_PWR_DN completion");
1288 return -ETIMEDOUT;
1289 }
1290 } while (buf & OTP_PWR_DN_PWRDN_N_);
1291 }
1292
1293 /* set to BYTE program mode */
1294 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 if (ret < 0)
1296 return ret;
1297
1298 for (i = 0; i < length; i++) {
1299 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 if (ret < 0)
1302 return ret;
1303
1304 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 ((offset + i) & OTP_ADDR2_10_3));
1306 if (ret < 0)
1307 return ret;
1308
1309 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 if (ret < 0)
1311 return ret;
1312
1313 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 if (ret < 0)
1315 return ret;
1316
1317 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 if (ret < 0)
1319 return ret;
1320
1321 timeout = jiffies + HZ;
1322 do {
1323 udelay(1);
1324 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 if (ret < 0)
1326 return ret;
1327
1328 if (time_after(jiffies, timeout)) {
1329 netdev_warn(dev->net,
1330 "Timeout on OTP_STATUS completion");
1331 return -ETIMEDOUT;
1332 }
1333 } while (buf & OTP_STATUS_BUSY_);
1334 }
1335
1336 return 0;
1337 }
1338
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1339 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 u32 length, u8 *data)
1341 {
1342 u8 sig;
1343 int ret;
1344
1345 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346
1347 if (ret == 0) {
1348 if (sig == OTP_INDICATOR_2)
1349 offset += 0x100;
1350 else if (sig != OTP_INDICATOR_1)
1351 ret = -EINVAL;
1352 if (!ret)
1353 ret = lan78xx_read_raw_otp(dev, offset, length, data);
1354 }
1355
1356 return ret;
1357 }
1358
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1359 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360 {
1361 int i, ret;
1362
1363 for (i = 0; i < 100; i++) {
1364 u32 dp_sel;
1365
1366 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 if (unlikely(ret < 0))
1368 return ret;
1369
1370 if (dp_sel & DP_SEL_DPRDY_)
1371 return 0;
1372
1373 usleep_range(40, 100);
1374 }
1375
1376 netdev_warn(dev->net, "%s timed out", __func__);
1377
1378 return -ETIMEDOUT;
1379 }
1380
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1381 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 u32 addr, u32 length, u32 *buf)
1383 {
1384 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 int i, ret;
1386
1387 ret = usb_autopm_get_interface(dev->intf);
1388 if (ret < 0)
1389 return ret;
1390
1391 mutex_lock(&pdata->dataport_mutex);
1392
1393 ret = lan78xx_dataport_wait_not_busy(dev);
1394 if (ret < 0)
1395 goto dataport_write;
1396
1397 ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 if (ret < 0)
1399 goto dataport_write;
1400
1401 for (i = 0; i < length; i++) {
1402 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1403 if (ret < 0)
1404 goto dataport_write;
1405
1406 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1407 if (ret < 0)
1408 goto dataport_write;
1409
1410 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1411 if (ret < 0)
1412 goto dataport_write;
1413
1414 ret = lan78xx_dataport_wait_not_busy(dev);
1415 if (ret < 0)
1416 goto dataport_write;
1417 }
1418
1419 dataport_write:
1420 if (ret < 0)
1421 netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422
1423 mutex_unlock(&pdata->dataport_mutex);
1424 usb_autopm_put_interface(dev->intf);
1425
1426 return ret;
1427 }
1428
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1429 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 int index, u8 addr[ETH_ALEN])
1431 {
1432 u32 temp;
1433
1434 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 temp = addr[3];
1436 temp = addr[2] | (temp << 8);
1437 temp = addr[1] | (temp << 8);
1438 temp = addr[0] | (temp << 8);
1439 pdata->pfilter_table[index][1] = temp;
1440 temp = addr[5];
1441 temp = addr[4] | (temp << 8);
1442 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 pdata->pfilter_table[index][0] = temp;
1444 }
1445 }
1446
1447 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1448 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449 {
1450 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451 }
1452
lan78xx_deferred_multicast_write(struct work_struct * param)1453 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454 {
1455 struct lan78xx_priv *pdata =
1456 container_of(param, struct lan78xx_priv, set_multicast);
1457 struct lan78xx_net *dev = pdata->dev;
1458 int i, ret;
1459
1460 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 pdata->rfe_ctl);
1462
1463 ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 DP_SEL_VHF_VLAN_LEN,
1465 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 if (ret < 0)
1467 goto multicast_write_done;
1468
1469 for (i = 1; i < NUM_OF_MAF; i++) {
1470 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 if (ret < 0)
1472 goto multicast_write_done;
1473
1474 ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 pdata->pfilter_table[i][1]);
1476 if (ret < 0)
1477 goto multicast_write_done;
1478
1479 ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 pdata->pfilter_table[i][0]);
1481 if (ret < 0)
1482 goto multicast_write_done;
1483 }
1484
1485 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486
1487 multicast_write_done:
1488 if (ret < 0)
1489 netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 return;
1491 }
1492
lan78xx_set_multicast(struct net_device * netdev)1493 static void lan78xx_set_multicast(struct net_device *netdev)
1494 {
1495 struct lan78xx_net *dev = netdev_priv(netdev);
1496 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 unsigned long flags;
1498 int i;
1499
1500 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501
1502 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504
1505 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1506 pdata->mchash_table[i] = 0;
1507
1508 /* pfilter_table[0] has own HW address */
1509 for (i = 1; i < NUM_OF_MAF; i++) {
1510 pdata->pfilter_table[i][0] = 0;
1511 pdata->pfilter_table[i][1] = 0;
1512 }
1513
1514 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515
1516 if (dev->net->flags & IFF_PROMISC) {
1517 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 } else {
1520 if (dev->net->flags & IFF_ALLMULTI) {
1521 netif_dbg(dev, drv, dev->net,
1522 "receive all multicast enabled");
1523 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 }
1525 }
1526
1527 if (netdev_mc_count(dev->net)) {
1528 struct netdev_hw_addr *ha;
1529 int i;
1530
1531 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532
1533 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534
1535 i = 1;
1536 netdev_for_each_mc_addr(ha, netdev) {
1537 /* set first 32 into Perfect Filter */
1538 if (i < 33) {
1539 lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 } else {
1541 u32 bitnum = lan78xx_hash(ha->addr);
1542
1543 pdata->mchash_table[bitnum / 32] |=
1544 (1 << (bitnum % 32));
1545 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 }
1547 i++;
1548 }
1549 }
1550
1551 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552
1553 /* defer register writes to a sleepable context */
1554 schedule_work(&pdata->set_multicast);
1555 }
1556
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1557 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1558 u16 lcladv, u16 rmtadv)
1559 {
1560 u32 flow = 0, fct_flow = 0;
1561 u8 cap;
1562
1563 if (dev->fc_autoneg)
1564 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1565 else
1566 cap = dev->fc_request_control;
1567
1568 if (cap & FLOW_CTRL_TX)
1569 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1570
1571 if (cap & FLOW_CTRL_RX)
1572 flow |= FLOW_CR_RX_FCEN_;
1573
1574 if (dev->udev->speed == USB_SPEED_SUPER)
1575 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1576 else if (dev->udev->speed == USB_SPEED_HIGH)
1577 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1578
1579 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1580 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1581 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1582
1583 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1584
1585 /* threshold value should be set before enabling flow */
1586 lan78xx_write_reg(dev, FLOW, flow);
1587
1588 return 0;
1589 }
1590
1591 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1592
lan78xx_mac_reset(struct lan78xx_net * dev)1593 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1594 {
1595 unsigned long start_time = jiffies;
1596 u32 val;
1597 int ret;
1598
1599 mutex_lock(&dev->mdiobus_mutex);
1600
1601 /* Resetting the device while there is activity on the MDIO
1602 * bus can result in the MAC interface locking up and not
1603 * completing register access transactions.
1604 */
1605 ret = lan78xx_mdiobus_wait_not_busy(dev);
1606 if (ret < 0)
1607 goto exit_unlock;
1608
1609 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1610 if (ret < 0)
1611 goto exit_unlock;
1612
1613 val |= MAC_CR_RST_;
1614 ret = lan78xx_write_reg(dev, MAC_CR, val);
1615 if (ret < 0)
1616 goto exit_unlock;
1617
1618 /* Wait for the reset to complete before allowing any further
1619 * MAC register accesses otherwise the MAC may lock up.
1620 */
1621 do {
1622 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1623 if (ret < 0)
1624 goto exit_unlock;
1625
1626 if (!(val & MAC_CR_RST_)) {
1627 ret = 0;
1628 goto exit_unlock;
1629 }
1630 } while (!time_after(jiffies, start_time + HZ));
1631
1632 ret = -ETIMEDOUT;
1633 exit_unlock:
1634 mutex_unlock(&dev->mdiobus_mutex);
1635
1636 return ret;
1637 }
1638
lan78xx_link_reset(struct lan78xx_net * dev)1639 static int lan78xx_link_reset(struct lan78xx_net *dev)
1640 {
1641 struct phy_device *phydev = dev->net->phydev;
1642 struct ethtool_link_ksettings ecmd;
1643 int ladv, radv, ret, link;
1644 u32 buf;
1645
1646 /* clear LAN78xx interrupt status */
1647 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1648 if (unlikely(ret < 0))
1649 return ret;
1650
1651 mutex_lock(&phydev->lock);
1652 phy_read_status(phydev);
1653 link = phydev->link;
1654 mutex_unlock(&phydev->lock);
1655
1656 if (!link && dev->link_on) {
1657 dev->link_on = false;
1658
1659 /* reset MAC */
1660 ret = lan78xx_mac_reset(dev);
1661 if (ret < 0)
1662 return ret;
1663
1664 del_timer(&dev->stat_monitor);
1665 } else if (link && !dev->link_on) {
1666 dev->link_on = true;
1667
1668 phy_ethtool_ksettings_get(phydev, &ecmd);
1669
1670 if (dev->udev->speed == USB_SPEED_SUPER) {
1671 if (ecmd.base.speed == 1000) {
1672 /* disable U2 */
1673 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1674 if (ret < 0)
1675 return ret;
1676 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1677 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1678 if (ret < 0)
1679 return ret;
1680 /* enable U1 */
1681 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1682 if (ret < 0)
1683 return ret;
1684 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1685 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1686 if (ret < 0)
1687 return ret;
1688 } else {
1689 /* enable U1 & U2 */
1690 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1691 if (ret < 0)
1692 return ret;
1693 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1694 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1695 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1696 if (ret < 0)
1697 return ret;
1698 }
1699 }
1700
1701 ladv = phy_read(phydev, MII_ADVERTISE);
1702 if (ladv < 0)
1703 return ladv;
1704
1705 radv = phy_read(phydev, MII_LPA);
1706 if (radv < 0)
1707 return radv;
1708
1709 netif_dbg(dev, link, dev->net,
1710 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1711 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1712
1713 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1714 radv);
1715 if (ret < 0)
1716 return ret;
1717
1718 if (!timer_pending(&dev->stat_monitor)) {
1719 dev->delta = 1;
1720 mod_timer(&dev->stat_monitor,
1721 jiffies + STAT_UPDATE_TIMER);
1722 }
1723
1724 lan78xx_rx_urb_submit_all(dev);
1725
1726 local_bh_disable();
1727 napi_schedule(&dev->napi);
1728 local_bh_enable();
1729 }
1730
1731 return 0;
1732 }
1733
1734 /* some work can't be done in tasklets, so we use keventd
1735 *
1736 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1737 * but tasklet_schedule() doesn't. hope the failure is rare.
1738 */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1739 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1740 {
1741 set_bit(work, &dev->flags);
1742 if (!schedule_delayed_work(&dev->wq, 0))
1743 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1744 }
1745
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1746 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1747 {
1748 u32 intdata;
1749
1750 if (urb->actual_length != 4) {
1751 netdev_warn(dev->net,
1752 "unexpected urb length %d", urb->actual_length);
1753 return;
1754 }
1755
1756 intdata = get_unaligned_le32(urb->transfer_buffer);
1757
1758 if (intdata & INT_ENP_PHY_INT) {
1759 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1760 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1761
1762 if (dev->domain_data.phyirq > 0)
1763 generic_handle_irq_safe(dev->domain_data.phyirq);
1764 } else {
1765 netdev_warn(dev->net,
1766 "unexpected interrupt: 0x%08x\n", intdata);
1767 }
1768 }
1769
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1770 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1771 {
1772 return MAX_EEPROM_SIZE;
1773 }
1774
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1775 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1776 struct ethtool_eeprom *ee, u8 *data)
1777 {
1778 struct lan78xx_net *dev = netdev_priv(netdev);
1779 int ret;
1780
1781 ret = usb_autopm_get_interface(dev->intf);
1782 if (ret)
1783 return ret;
1784
1785 ee->magic = LAN78XX_EEPROM_MAGIC;
1786
1787 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1788
1789 usb_autopm_put_interface(dev->intf);
1790
1791 return ret;
1792 }
1793
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1794 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1795 struct ethtool_eeprom *ee, u8 *data)
1796 {
1797 struct lan78xx_net *dev = netdev_priv(netdev);
1798 int ret;
1799
1800 ret = usb_autopm_get_interface(dev->intf);
1801 if (ret)
1802 return ret;
1803
1804 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1805 * to load data from EEPROM
1806 */
1807 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1808 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1809 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1810 (ee->offset == 0) &&
1811 (ee->len == 512) &&
1812 (data[0] == OTP_INDICATOR_1))
1813 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1814
1815 usb_autopm_put_interface(dev->intf);
1816
1817 return ret;
1818 }
1819
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1820 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1821 u8 *data)
1822 {
1823 if (stringset == ETH_SS_STATS)
1824 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1825 }
1826
lan78xx_get_sset_count(struct net_device * netdev,int sset)1827 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1828 {
1829 if (sset == ETH_SS_STATS)
1830 return ARRAY_SIZE(lan78xx_gstrings);
1831 else
1832 return -EOPNOTSUPP;
1833 }
1834
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1835 static void lan78xx_get_stats(struct net_device *netdev,
1836 struct ethtool_stats *stats, u64 *data)
1837 {
1838 struct lan78xx_net *dev = netdev_priv(netdev);
1839
1840 lan78xx_update_stats(dev);
1841
1842 mutex_lock(&dev->stats.access_lock);
1843 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1844 mutex_unlock(&dev->stats.access_lock);
1845 }
1846
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1847 static void lan78xx_get_wol(struct net_device *netdev,
1848 struct ethtool_wolinfo *wol)
1849 {
1850 struct lan78xx_net *dev = netdev_priv(netdev);
1851 int ret;
1852 u32 buf;
1853 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1854
1855 if (usb_autopm_get_interface(dev->intf) < 0)
1856 return;
1857
1858 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1859 if (unlikely(ret < 0)) {
1860 netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1861 wol->supported = 0;
1862 wol->wolopts = 0;
1863 } else {
1864 if (buf & USB_CFG_RMT_WKP_) {
1865 wol->supported = WAKE_ALL;
1866 wol->wolopts = pdata->wol;
1867 } else {
1868 wol->supported = 0;
1869 wol->wolopts = 0;
1870 }
1871 }
1872
1873 usb_autopm_put_interface(dev->intf);
1874 }
1875
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1876 static int lan78xx_set_wol(struct net_device *netdev,
1877 struct ethtool_wolinfo *wol)
1878 {
1879 struct lan78xx_net *dev = netdev_priv(netdev);
1880 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1881 int ret;
1882
1883 if (wol->wolopts & ~WAKE_ALL)
1884 return -EINVAL;
1885
1886 ret = usb_autopm_get_interface(dev->intf);
1887 if (ret < 0)
1888 return ret;
1889
1890 pdata->wol = wol->wolopts;
1891
1892 ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1893 if (ret < 0)
1894 goto exit_pm_put;
1895
1896 ret = phy_ethtool_set_wol(netdev->phydev, wol);
1897
1898 exit_pm_put:
1899 usb_autopm_put_interface(dev->intf);
1900
1901 return ret;
1902 }
1903
lan78xx_get_eee(struct net_device * net,struct ethtool_keee * edata)1904 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1905 {
1906 struct lan78xx_net *dev = netdev_priv(net);
1907 struct phy_device *phydev = net->phydev;
1908 int ret;
1909 u32 buf;
1910
1911 ret = usb_autopm_get_interface(dev->intf);
1912 if (ret < 0)
1913 return ret;
1914
1915 ret = phy_ethtool_get_eee(phydev, edata);
1916 if (ret < 0)
1917 goto exit;
1918
1919 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1920 if (buf & MAC_CR_EEE_EN_) {
1921 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1922 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1923 edata->tx_lpi_timer = buf;
1924 } else {
1925 edata->tx_lpi_timer = 0;
1926 }
1927
1928 ret = 0;
1929 exit:
1930 usb_autopm_put_interface(dev->intf);
1931
1932 return ret;
1933 }
1934
lan78xx_set_eee(struct net_device * net,struct ethtool_keee * edata)1935 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1936 {
1937 struct lan78xx_net *dev = netdev_priv(net);
1938 int ret;
1939 u32 buf;
1940
1941 ret = usb_autopm_get_interface(dev->intf);
1942 if (ret < 0)
1943 return ret;
1944
1945 ret = phy_ethtool_set_eee(net->phydev, edata);
1946 if (ret < 0)
1947 goto out;
1948
1949 buf = (u32)edata->tx_lpi_timer;
1950 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1951 out:
1952 usb_autopm_put_interface(dev->intf);
1953
1954 return ret;
1955 }
1956
lan78xx_get_link(struct net_device * net)1957 static u32 lan78xx_get_link(struct net_device *net)
1958 {
1959 u32 link;
1960
1961 mutex_lock(&net->phydev->lock);
1962 phy_read_status(net->phydev);
1963 link = net->phydev->link;
1964 mutex_unlock(&net->phydev->lock);
1965
1966 return link;
1967 }
1968
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1969 static void lan78xx_get_drvinfo(struct net_device *net,
1970 struct ethtool_drvinfo *info)
1971 {
1972 struct lan78xx_net *dev = netdev_priv(net);
1973
1974 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1975 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1976 }
1977
lan78xx_get_msglevel(struct net_device * net)1978 static u32 lan78xx_get_msglevel(struct net_device *net)
1979 {
1980 struct lan78xx_net *dev = netdev_priv(net);
1981
1982 return dev->msg_enable;
1983 }
1984
lan78xx_set_msglevel(struct net_device * net,u32 level)1985 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1986 {
1987 struct lan78xx_net *dev = netdev_priv(net);
1988
1989 dev->msg_enable = level;
1990 }
1991
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1992 static int lan78xx_get_link_ksettings(struct net_device *net,
1993 struct ethtool_link_ksettings *cmd)
1994 {
1995 struct lan78xx_net *dev = netdev_priv(net);
1996 struct phy_device *phydev = net->phydev;
1997 int ret;
1998
1999 ret = usb_autopm_get_interface(dev->intf);
2000 if (ret < 0)
2001 return ret;
2002
2003 phy_ethtool_ksettings_get(phydev, cmd);
2004
2005 usb_autopm_put_interface(dev->intf);
2006
2007 return ret;
2008 }
2009
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)2010 static int lan78xx_set_link_ksettings(struct net_device *net,
2011 const struct ethtool_link_ksettings *cmd)
2012 {
2013 struct lan78xx_net *dev = netdev_priv(net);
2014 struct phy_device *phydev = net->phydev;
2015 int ret = 0;
2016 int temp;
2017
2018 ret = usb_autopm_get_interface(dev->intf);
2019 if (ret < 0)
2020 return ret;
2021
2022 /* change speed & duplex */
2023 ret = phy_ethtool_ksettings_set(phydev, cmd);
2024
2025 if (!cmd->base.autoneg) {
2026 /* force link down */
2027 temp = phy_read(phydev, MII_BMCR);
2028 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2029 mdelay(1);
2030 phy_write(phydev, MII_BMCR, temp);
2031 }
2032
2033 usb_autopm_put_interface(dev->intf);
2034
2035 return ret;
2036 }
2037
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)2038 static void lan78xx_get_pause(struct net_device *net,
2039 struct ethtool_pauseparam *pause)
2040 {
2041 struct lan78xx_net *dev = netdev_priv(net);
2042 struct phy_device *phydev = net->phydev;
2043 struct ethtool_link_ksettings ecmd;
2044
2045 phy_ethtool_ksettings_get(phydev, &ecmd);
2046
2047 pause->autoneg = dev->fc_autoneg;
2048
2049 if (dev->fc_request_control & FLOW_CTRL_TX)
2050 pause->tx_pause = 1;
2051
2052 if (dev->fc_request_control & FLOW_CTRL_RX)
2053 pause->rx_pause = 1;
2054 }
2055
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)2056 static int lan78xx_set_pause(struct net_device *net,
2057 struct ethtool_pauseparam *pause)
2058 {
2059 struct lan78xx_net *dev = netdev_priv(net);
2060 struct phy_device *phydev = net->phydev;
2061 struct ethtool_link_ksettings ecmd;
2062 int ret;
2063
2064 phy_ethtool_ksettings_get(phydev, &ecmd);
2065
2066 if (pause->autoneg && !ecmd.base.autoneg) {
2067 ret = -EINVAL;
2068 goto exit;
2069 }
2070
2071 dev->fc_request_control = 0;
2072 if (pause->rx_pause)
2073 dev->fc_request_control |= FLOW_CTRL_RX;
2074
2075 if (pause->tx_pause)
2076 dev->fc_request_control |= FLOW_CTRL_TX;
2077
2078 if (ecmd.base.autoneg) {
2079 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2080 u32 mii_adv;
2081
2082 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2083 ecmd.link_modes.advertising);
2084 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2085 ecmd.link_modes.advertising);
2086 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2087 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2088 linkmode_or(ecmd.link_modes.advertising, fc,
2089 ecmd.link_modes.advertising);
2090
2091 phy_ethtool_ksettings_set(phydev, &ecmd);
2092 }
2093
2094 dev->fc_autoneg = pause->autoneg;
2095
2096 ret = 0;
2097 exit:
2098 return ret;
2099 }
2100
lan78xx_get_regs_len(struct net_device * netdev)2101 static int lan78xx_get_regs_len(struct net_device *netdev)
2102 {
2103 return sizeof(lan78xx_regs);
2104 }
2105
2106 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)2107 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2108 void *buf)
2109 {
2110 struct lan78xx_net *dev = netdev_priv(netdev);
2111 unsigned int data_count = 0;
2112 u32 *data = buf;
2113 int i, ret;
2114
2115 /* Read Device/MAC registers */
2116 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2117 ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2118 if (ret < 0) {
2119 netdev_warn(dev->net,
2120 "failed to read register 0x%08x\n",
2121 lan78xx_regs[i]);
2122 goto clean_data;
2123 }
2124
2125 data_count++;
2126 }
2127
2128 return;
2129
2130 clean_data:
2131 memset(data, 0, data_count * sizeof(u32));
2132 }
2133
2134 static const struct ethtool_ops lan78xx_ethtool_ops = {
2135 .get_link = lan78xx_get_link,
2136 .nway_reset = phy_ethtool_nway_reset,
2137 .get_drvinfo = lan78xx_get_drvinfo,
2138 .get_msglevel = lan78xx_get_msglevel,
2139 .set_msglevel = lan78xx_set_msglevel,
2140 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2141 .get_eeprom = lan78xx_ethtool_get_eeprom,
2142 .set_eeprom = lan78xx_ethtool_set_eeprom,
2143 .get_ethtool_stats = lan78xx_get_stats,
2144 .get_sset_count = lan78xx_get_sset_count,
2145 .get_strings = lan78xx_get_strings,
2146 .get_wol = lan78xx_get_wol,
2147 .set_wol = lan78xx_set_wol,
2148 .get_ts_info = ethtool_op_get_ts_info,
2149 .get_eee = lan78xx_get_eee,
2150 .set_eee = lan78xx_set_eee,
2151 .get_pauseparam = lan78xx_get_pause,
2152 .set_pauseparam = lan78xx_set_pause,
2153 .get_link_ksettings = lan78xx_get_link_ksettings,
2154 .set_link_ksettings = lan78xx_set_link_ksettings,
2155 .get_regs_len = lan78xx_get_regs_len,
2156 .get_regs = lan78xx_get_regs,
2157 };
2158
lan78xx_init_mac_address(struct lan78xx_net * dev)2159 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2160 {
2161 u32 addr_lo, addr_hi;
2162 u8 addr[6];
2163 int ret;
2164
2165 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2166 if (ret < 0)
2167 return ret;
2168
2169 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2170 if (ret < 0)
2171 return ret;
2172
2173 addr[0] = addr_lo & 0xFF;
2174 addr[1] = (addr_lo >> 8) & 0xFF;
2175 addr[2] = (addr_lo >> 16) & 0xFF;
2176 addr[3] = (addr_lo >> 24) & 0xFF;
2177 addr[4] = addr_hi & 0xFF;
2178 addr[5] = (addr_hi >> 8) & 0xFF;
2179
2180 if (!is_valid_ether_addr(addr)) {
2181 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2182 /* valid address present in Device Tree */
2183 netif_dbg(dev, ifup, dev->net,
2184 "MAC address read from Device Tree");
2185 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2186 ETH_ALEN, addr) == 0) ||
2187 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2188 ETH_ALEN, addr) == 0)) &&
2189 is_valid_ether_addr(addr)) {
2190 /* eeprom values are valid so use them */
2191 netif_dbg(dev, ifup, dev->net,
2192 "MAC address read from EEPROM");
2193 } else {
2194 /* generate random MAC */
2195 eth_random_addr(addr);
2196 netif_dbg(dev, ifup, dev->net,
2197 "MAC address set to random addr");
2198 }
2199
2200 addr_lo = addr[0] | (addr[1] << 8) |
2201 (addr[2] << 16) | (addr[3] << 24);
2202 addr_hi = addr[4] | (addr[5] << 8);
2203
2204 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2205 if (ret < 0)
2206 return ret;
2207
2208 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2209 if (ret < 0)
2210 return ret;
2211 }
2212
2213 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2214 if (ret < 0)
2215 return ret;
2216
2217 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2218 if (ret < 0)
2219 return ret;
2220
2221 eth_hw_addr_set(dev->net, addr);
2222
2223 return 0;
2224 }
2225
2226 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)2227 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2228 {
2229 struct lan78xx_net *dev = bus->priv;
2230 u32 val, addr;
2231 int ret;
2232
2233 ret = usb_autopm_get_interface(dev->intf);
2234 if (ret < 0)
2235 return ret;
2236
2237 mutex_lock(&dev->mdiobus_mutex);
2238
2239 /* confirm MII not busy */
2240 ret = lan78xx_mdiobus_wait_not_busy(dev);
2241 if (ret < 0)
2242 goto done;
2243
2244 /* set the address, index & direction (read from PHY) */
2245 addr = mii_access(phy_id, idx, MII_READ);
2246 ret = lan78xx_write_reg(dev, MII_ACC, addr);
2247 if (ret < 0)
2248 goto done;
2249
2250 ret = lan78xx_mdiobus_wait_not_busy(dev);
2251 if (ret < 0)
2252 goto done;
2253
2254 ret = lan78xx_read_reg(dev, MII_DATA, &val);
2255 if (ret < 0)
2256 goto done;
2257
2258 ret = (int)(val & 0xFFFF);
2259
2260 done:
2261 mutex_unlock(&dev->mdiobus_mutex);
2262 usb_autopm_put_interface(dev->intf);
2263
2264 return ret;
2265 }
2266
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2267 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2268 u16 regval)
2269 {
2270 struct lan78xx_net *dev = bus->priv;
2271 u32 val, addr;
2272 int ret;
2273
2274 ret = usb_autopm_get_interface(dev->intf);
2275 if (ret < 0)
2276 return ret;
2277
2278 mutex_lock(&dev->mdiobus_mutex);
2279
2280 /* confirm MII not busy */
2281 ret = lan78xx_mdiobus_wait_not_busy(dev);
2282 if (ret < 0)
2283 goto done;
2284
2285 val = (u32)regval;
2286 ret = lan78xx_write_reg(dev, MII_DATA, val);
2287 if (ret < 0)
2288 goto done;
2289
2290 /* set the address, index & direction (write to PHY) */
2291 addr = mii_access(phy_id, idx, MII_WRITE);
2292 ret = lan78xx_write_reg(dev, MII_ACC, addr);
2293 if (ret < 0)
2294 goto done;
2295
2296 ret = lan78xx_mdiobus_wait_not_busy(dev);
2297 if (ret < 0)
2298 goto done;
2299
2300 done:
2301 mutex_unlock(&dev->mdiobus_mutex);
2302 usb_autopm_put_interface(dev->intf);
2303 return ret;
2304 }
2305
lan78xx_mdio_init(struct lan78xx_net * dev)2306 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2307 {
2308 struct device_node *node;
2309 int ret;
2310
2311 dev->mdiobus = mdiobus_alloc();
2312 if (!dev->mdiobus) {
2313 netdev_err(dev->net, "can't allocate MDIO bus\n");
2314 return -ENOMEM;
2315 }
2316
2317 dev->mdiobus->priv = (void *)dev;
2318 dev->mdiobus->read = lan78xx_mdiobus_read;
2319 dev->mdiobus->write = lan78xx_mdiobus_write;
2320 dev->mdiobus->name = "lan78xx-mdiobus";
2321 dev->mdiobus->parent = &dev->udev->dev;
2322
2323 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2324 dev->udev->bus->busnum, dev->udev->devnum);
2325
2326 switch (dev->chipid) {
2327 case ID_REV_CHIP_ID_7800_:
2328 case ID_REV_CHIP_ID_7850_:
2329 /* set to internal PHY id */
2330 dev->mdiobus->phy_mask = ~(1 << 1);
2331 break;
2332 case ID_REV_CHIP_ID_7801_:
2333 /* scan thru PHYAD[2..0] */
2334 dev->mdiobus->phy_mask = ~(0xFF);
2335 break;
2336 }
2337
2338 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2339 ret = of_mdiobus_register(dev->mdiobus, node);
2340 of_node_put(node);
2341 if (ret) {
2342 netdev_err(dev->net, "can't register MDIO bus\n");
2343 goto exit1;
2344 }
2345
2346 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2347 return 0;
2348 exit1:
2349 mdiobus_free(dev->mdiobus);
2350 return ret;
2351 }
2352
lan78xx_remove_mdio(struct lan78xx_net * dev)2353 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2354 {
2355 mdiobus_unregister(dev->mdiobus);
2356 mdiobus_free(dev->mdiobus);
2357 }
2358
lan78xx_link_status_change(struct net_device * net)2359 static void lan78xx_link_status_change(struct net_device *net)
2360 {
2361 struct lan78xx_net *dev = netdev_priv(net);
2362 struct phy_device *phydev = net->phydev;
2363 u32 data;
2364 int ret;
2365
2366 ret = lan78xx_read_reg(dev, MAC_CR, &data);
2367 if (ret < 0)
2368 return;
2369
2370 if (phydev->enable_tx_lpi)
2371 data |= MAC_CR_EEE_EN_;
2372 else
2373 data &= ~MAC_CR_EEE_EN_;
2374 lan78xx_write_reg(dev, MAC_CR, data);
2375
2376 phy_print_status(phydev);
2377 }
2378
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2379 static int irq_map(struct irq_domain *d, unsigned int irq,
2380 irq_hw_number_t hwirq)
2381 {
2382 struct irq_domain_data *data = d->host_data;
2383
2384 irq_set_chip_data(irq, data);
2385 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2386 irq_set_noprobe(irq);
2387
2388 return 0;
2389 }
2390
irq_unmap(struct irq_domain * d,unsigned int irq)2391 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2392 {
2393 irq_set_chip_and_handler(irq, NULL, NULL);
2394 irq_set_chip_data(irq, NULL);
2395 }
2396
2397 static const struct irq_domain_ops chip_domain_ops = {
2398 .map = irq_map,
2399 .unmap = irq_unmap,
2400 };
2401
lan78xx_irq_mask(struct irq_data * irqd)2402 static void lan78xx_irq_mask(struct irq_data *irqd)
2403 {
2404 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2405
2406 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2407 }
2408
lan78xx_irq_unmask(struct irq_data * irqd)2409 static void lan78xx_irq_unmask(struct irq_data *irqd)
2410 {
2411 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2412
2413 data->irqenable |= BIT(irqd_to_hwirq(irqd));
2414 }
2415
lan78xx_irq_bus_lock(struct irq_data * irqd)2416 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2417 {
2418 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2419
2420 mutex_lock(&data->irq_lock);
2421 }
2422
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2423 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2424 {
2425 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2426 struct lan78xx_net *dev =
2427 container_of(data, struct lan78xx_net, domain_data);
2428 u32 buf;
2429 int ret;
2430
2431 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2432 * are only two callbacks executed in non-atomic contex.
2433 */
2434 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2435 if (ret < 0)
2436 goto irq_bus_sync_unlock;
2437
2438 if (buf != data->irqenable)
2439 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2440
2441 irq_bus_sync_unlock:
2442 if (ret < 0)
2443 netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2444 ERR_PTR(ret));
2445
2446 mutex_unlock(&data->irq_lock);
2447 }
2448
2449 static struct irq_chip lan78xx_irqchip = {
2450 .name = "lan78xx-irqs",
2451 .irq_mask = lan78xx_irq_mask,
2452 .irq_unmask = lan78xx_irq_unmask,
2453 .irq_bus_lock = lan78xx_irq_bus_lock,
2454 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
2455 };
2456
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2457 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2458 {
2459 struct device_node *of_node;
2460 struct irq_domain *irqdomain;
2461 unsigned int irqmap = 0;
2462 u32 buf;
2463 int ret = 0;
2464
2465 of_node = dev->udev->dev.parent->of_node;
2466
2467 mutex_init(&dev->domain_data.irq_lock);
2468
2469 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2470 if (ret < 0)
2471 return ret;
2472
2473 dev->domain_data.irqenable = buf;
2474
2475 dev->domain_data.irqchip = &lan78xx_irqchip;
2476 dev->domain_data.irq_handler = handle_simple_irq;
2477
2478 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2479 &chip_domain_ops, &dev->domain_data);
2480 if (irqdomain) {
2481 /* create mapping for PHY interrupt */
2482 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2483 if (!irqmap) {
2484 irq_domain_remove(irqdomain);
2485
2486 irqdomain = NULL;
2487 ret = -EINVAL;
2488 }
2489 } else {
2490 ret = -EINVAL;
2491 }
2492
2493 dev->domain_data.irqdomain = irqdomain;
2494 dev->domain_data.phyirq = irqmap;
2495
2496 return ret;
2497 }
2498
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2499 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2500 {
2501 if (dev->domain_data.phyirq > 0) {
2502 irq_dispose_mapping(dev->domain_data.phyirq);
2503
2504 if (dev->domain_data.irqdomain)
2505 irq_domain_remove(dev->domain_data.irqdomain);
2506 }
2507 dev->domain_data.phyirq = 0;
2508 dev->domain_data.irqdomain = NULL;
2509 }
2510
lan7801_phy_init(struct lan78xx_net * dev)2511 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2512 {
2513 u32 buf;
2514 int ret;
2515 struct fixed_phy_status fphy_status = {
2516 .link = 1,
2517 .speed = SPEED_1000,
2518 .duplex = DUPLEX_FULL,
2519 };
2520 struct phy_device *phydev;
2521
2522 phydev = phy_find_first(dev->mdiobus);
2523 if (!phydev) {
2524 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2525 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2526 if (IS_ERR(phydev)) {
2527 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2528 return NULL;
2529 }
2530 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2531 dev->interface = PHY_INTERFACE_MODE_RGMII;
2532 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2533 MAC_RGMII_ID_TXC_DELAY_EN_);
2534 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2535 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2536 buf |= HW_CFG_CLK125_EN_;
2537 buf |= HW_CFG_REFCLK25_EN_;
2538 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2539 } else {
2540 if (!phydev->drv) {
2541 netdev_err(dev->net, "no PHY driver found\n");
2542 return NULL;
2543 }
2544 dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2545 /* The PHY driver is responsible to configure proper RGMII
2546 * interface delays. Disable RGMII delays on MAC side.
2547 */
2548 lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2549
2550 phydev->is_internal = false;
2551 }
2552 return phydev;
2553 }
2554
lan78xx_phy_init(struct lan78xx_net * dev)2555 static int lan78xx_phy_init(struct lan78xx_net *dev)
2556 {
2557 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2558 int ret;
2559 u32 mii_adv;
2560 struct phy_device *phydev;
2561
2562 switch (dev->chipid) {
2563 case ID_REV_CHIP_ID_7801_:
2564 phydev = lan7801_phy_init(dev);
2565 if (!phydev) {
2566 netdev_err(dev->net, "lan7801: PHY Init Failed");
2567 return -EIO;
2568 }
2569 break;
2570
2571 case ID_REV_CHIP_ID_7800_:
2572 case ID_REV_CHIP_ID_7850_:
2573 phydev = phy_find_first(dev->mdiobus);
2574 if (!phydev) {
2575 netdev_err(dev->net, "no PHY found\n");
2576 return -EIO;
2577 }
2578 phydev->is_internal = true;
2579 dev->interface = PHY_INTERFACE_MODE_GMII;
2580 break;
2581
2582 default:
2583 netdev_err(dev->net, "Unknown CHIP ID found\n");
2584 return -EIO;
2585 }
2586
2587 /* if phyirq is not set, use polling mode in phylib */
2588 if (dev->domain_data.phyirq > 0)
2589 phydev->irq = dev->domain_data.phyirq;
2590 else
2591 phydev->irq = PHY_POLL;
2592 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2593
2594 /* set to AUTOMDIX */
2595 phydev->mdix = ETH_TP_MDI_AUTO;
2596
2597 ret = phy_connect_direct(dev->net, phydev,
2598 lan78xx_link_status_change,
2599 dev->interface);
2600 if (ret) {
2601 netdev_err(dev->net, "can't attach PHY to %s\n",
2602 dev->mdiobus->id);
2603 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2604 if (phy_is_pseudo_fixed_link(phydev)) {
2605 fixed_phy_unregister(phydev);
2606 phy_device_free(phydev);
2607 }
2608 }
2609 return -EIO;
2610 }
2611
2612 /* MAC doesn't support 1000T Half */
2613 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2614
2615 /* support both flow controls */
2616 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2617 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2618 phydev->advertising);
2619 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2620 phydev->advertising);
2621 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2622 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2623 linkmode_or(phydev->advertising, fc, phydev->advertising);
2624
2625 phy_support_eee(phydev);
2626
2627 if (phydev->mdio.dev.of_node) {
2628 u32 reg;
2629 int len;
2630
2631 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2632 "microchip,led-modes",
2633 sizeof(u32));
2634 if (len >= 0) {
2635 /* Ensure the appropriate LEDs are enabled */
2636 lan78xx_read_reg(dev, HW_CFG, ®);
2637 reg &= ~(HW_CFG_LED0_EN_ |
2638 HW_CFG_LED1_EN_ |
2639 HW_CFG_LED2_EN_ |
2640 HW_CFG_LED3_EN_);
2641 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2642 (len > 1) * HW_CFG_LED1_EN_ |
2643 (len > 2) * HW_CFG_LED2_EN_ |
2644 (len > 3) * HW_CFG_LED3_EN_;
2645 lan78xx_write_reg(dev, HW_CFG, reg);
2646 }
2647 }
2648
2649 genphy_config_aneg(phydev);
2650
2651 dev->fc_autoneg = phydev->autoneg;
2652
2653 return 0;
2654 }
2655
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2656 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2657 {
2658 bool rxenabled;
2659 u32 buf;
2660 int ret;
2661
2662 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2663 if (ret < 0)
2664 return ret;
2665
2666 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2667
2668 if (rxenabled) {
2669 buf &= ~MAC_RX_RXEN_;
2670 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2671 if (ret < 0)
2672 return ret;
2673 }
2674
2675 /* add 4 to size for FCS */
2676 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2677 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2678
2679 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2680 if (ret < 0)
2681 return ret;
2682
2683 if (rxenabled) {
2684 buf |= MAC_RX_RXEN_;
2685 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2686 if (ret < 0)
2687 return ret;
2688 }
2689
2690 return 0;
2691 }
2692
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2693 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2694 {
2695 struct sk_buff *skb;
2696 unsigned long flags;
2697 int count = 0;
2698
2699 spin_lock_irqsave(&q->lock, flags);
2700 while (!skb_queue_empty(q)) {
2701 struct skb_data *entry;
2702 struct urb *urb;
2703 int ret;
2704
2705 skb_queue_walk(q, skb) {
2706 entry = (struct skb_data *)skb->cb;
2707 if (entry->state != unlink_start)
2708 goto found;
2709 }
2710 break;
2711 found:
2712 entry->state = unlink_start;
2713 urb = entry->urb;
2714
2715 /* Get reference count of the URB to avoid it to be
2716 * freed during usb_unlink_urb, which may trigger
2717 * use-after-free problem inside usb_unlink_urb since
2718 * usb_unlink_urb is always racing with .complete
2719 * handler(include defer_bh).
2720 */
2721 usb_get_urb(urb);
2722 spin_unlock_irqrestore(&q->lock, flags);
2723 /* during some PM-driven resume scenarios,
2724 * these (async) unlinks complete immediately
2725 */
2726 ret = usb_unlink_urb(urb);
2727 if (ret != -EINPROGRESS && ret != 0)
2728 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2729 else
2730 count++;
2731 usb_put_urb(urb);
2732 spin_lock_irqsave(&q->lock, flags);
2733 }
2734 spin_unlock_irqrestore(&q->lock, flags);
2735 return count;
2736 }
2737
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2738 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2739 {
2740 struct lan78xx_net *dev = netdev_priv(netdev);
2741 int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2742 int ret;
2743
2744 /* no second zero-length packet read wanted after mtu-sized packets */
2745 if ((max_frame_len % dev->maxpacket) == 0)
2746 return -EDOM;
2747
2748 ret = usb_autopm_get_interface(dev->intf);
2749 if (ret < 0)
2750 return ret;
2751
2752 ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2753 if (ret < 0)
2754 netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2755 new_mtu, netdev->mtu, ERR_PTR(ret));
2756 else
2757 WRITE_ONCE(netdev->mtu, new_mtu);
2758
2759 usb_autopm_put_interface(dev->intf);
2760
2761 return ret;
2762 }
2763
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2764 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2765 {
2766 struct lan78xx_net *dev = netdev_priv(netdev);
2767 struct sockaddr *addr = p;
2768 u32 addr_lo, addr_hi;
2769 int ret;
2770
2771 if (netif_running(netdev))
2772 return -EBUSY;
2773
2774 if (!is_valid_ether_addr(addr->sa_data))
2775 return -EADDRNOTAVAIL;
2776
2777 eth_hw_addr_set(netdev, addr->sa_data);
2778
2779 addr_lo = netdev->dev_addr[0] |
2780 netdev->dev_addr[1] << 8 |
2781 netdev->dev_addr[2] << 16 |
2782 netdev->dev_addr[3] << 24;
2783 addr_hi = netdev->dev_addr[4] |
2784 netdev->dev_addr[5] << 8;
2785
2786 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2787 if (ret < 0)
2788 return ret;
2789
2790 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2791 if (ret < 0)
2792 return ret;
2793
2794 /* Added to support MAC address changes */
2795 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2796 if (ret < 0)
2797 return ret;
2798
2799 return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2800 }
2801
2802 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2803 static int lan78xx_set_features(struct net_device *netdev,
2804 netdev_features_t features)
2805 {
2806 struct lan78xx_net *dev = netdev_priv(netdev);
2807 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2811
2812 if (features & NETIF_F_RXCSUM) {
2813 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2814 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2815 } else {
2816 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2817 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2818 }
2819
2820 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2821 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2822 else
2823 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2824
2825 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2826 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2827 else
2828 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2829
2830 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2831
2832 return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2833 }
2834
lan78xx_deferred_vlan_write(struct work_struct * param)2835 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2836 {
2837 struct lan78xx_priv *pdata =
2838 container_of(param, struct lan78xx_priv, set_vlan);
2839 struct lan78xx_net *dev = pdata->dev;
2840
2841 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2842 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2843 }
2844
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2845 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2846 __be16 proto, u16 vid)
2847 {
2848 struct lan78xx_net *dev = netdev_priv(netdev);
2849 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2850 u16 vid_bit_index;
2851 u16 vid_dword_index;
2852
2853 vid_dword_index = (vid >> 5) & 0x7F;
2854 vid_bit_index = vid & 0x1F;
2855
2856 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2857
2858 /* defer register writes to a sleepable context */
2859 schedule_work(&pdata->set_vlan);
2860
2861 return 0;
2862 }
2863
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2864 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2865 __be16 proto, u16 vid)
2866 {
2867 struct lan78xx_net *dev = netdev_priv(netdev);
2868 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2869 u16 vid_bit_index;
2870 u16 vid_dword_index;
2871
2872 vid_dword_index = (vid >> 5) & 0x7F;
2873 vid_bit_index = vid & 0x1F;
2874
2875 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2876
2877 /* defer register writes to a sleepable context */
2878 schedule_work(&pdata->set_vlan);
2879
2880 return 0;
2881 }
2882
lan78xx_init_ltm(struct lan78xx_net * dev)2883 static int lan78xx_init_ltm(struct lan78xx_net *dev)
2884 {
2885 u32 regs[6] = { 0 };
2886 int ret;
2887 u32 buf;
2888
2889 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2890 if (ret < 0)
2891 goto init_ltm_failed;
2892
2893 if (buf & USB_CFG1_LTM_ENABLE_) {
2894 u8 temp[2];
2895 /* Get values from EEPROM first */
2896 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2897 if (temp[0] == 24) {
2898 ret = lan78xx_read_raw_eeprom(dev,
2899 temp[1] * 2,
2900 24,
2901 (u8 *)regs);
2902 if (ret < 0)
2903 return ret;
2904 }
2905 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2906 if (temp[0] == 24) {
2907 ret = lan78xx_read_raw_otp(dev,
2908 temp[1] * 2,
2909 24,
2910 (u8 *)regs);
2911 if (ret < 0)
2912 return ret;
2913 }
2914 }
2915 }
2916
2917 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2918 if (ret < 0)
2919 goto init_ltm_failed;
2920
2921 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2922 if (ret < 0)
2923 goto init_ltm_failed;
2924
2925 ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2926 if (ret < 0)
2927 goto init_ltm_failed;
2928
2929 ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2930 if (ret < 0)
2931 goto init_ltm_failed;
2932
2933 ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2934 if (ret < 0)
2935 goto init_ltm_failed;
2936
2937 ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2938 if (ret < 0)
2939 goto init_ltm_failed;
2940
2941 return 0;
2942
2943 init_ltm_failed:
2944 netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
2945 return ret;
2946 }
2947
lan78xx_urb_config_init(struct lan78xx_net * dev)2948 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2949 {
2950 int result = 0;
2951
2952 switch (dev->udev->speed) {
2953 case USB_SPEED_SUPER:
2954 dev->rx_urb_size = RX_SS_URB_SIZE;
2955 dev->tx_urb_size = TX_SS_URB_SIZE;
2956 dev->n_rx_urbs = RX_SS_URB_NUM;
2957 dev->n_tx_urbs = TX_SS_URB_NUM;
2958 dev->bulk_in_delay = SS_BULK_IN_DELAY;
2959 dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2960 break;
2961 case USB_SPEED_HIGH:
2962 dev->rx_urb_size = RX_HS_URB_SIZE;
2963 dev->tx_urb_size = TX_HS_URB_SIZE;
2964 dev->n_rx_urbs = RX_HS_URB_NUM;
2965 dev->n_tx_urbs = TX_HS_URB_NUM;
2966 dev->bulk_in_delay = HS_BULK_IN_DELAY;
2967 dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2968 break;
2969 case USB_SPEED_FULL:
2970 dev->rx_urb_size = RX_FS_URB_SIZE;
2971 dev->tx_urb_size = TX_FS_URB_SIZE;
2972 dev->n_rx_urbs = RX_FS_URB_NUM;
2973 dev->n_tx_urbs = TX_FS_URB_NUM;
2974 dev->bulk_in_delay = FS_BULK_IN_DELAY;
2975 dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2976 break;
2977 default:
2978 netdev_warn(dev->net, "USB bus speed not supported\n");
2979 result = -EIO;
2980 break;
2981 }
2982
2983 return result;
2984 }
2985
lan78xx_reset(struct lan78xx_net * dev)2986 static int lan78xx_reset(struct lan78xx_net *dev)
2987 {
2988 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2989 unsigned long timeout;
2990 int ret;
2991 u32 buf;
2992 u8 sig;
2993
2994 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2995 if (ret < 0)
2996 return ret;
2997
2998 buf |= HW_CFG_LRST_;
2999
3000 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3001 if (ret < 0)
3002 return ret;
3003
3004 timeout = jiffies + HZ;
3005 do {
3006 mdelay(1);
3007 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3008 if (ret < 0)
3009 return ret;
3010
3011 if (time_after(jiffies, timeout)) {
3012 netdev_warn(dev->net,
3013 "timeout on completion of LiteReset");
3014 ret = -ETIMEDOUT;
3015 return ret;
3016 }
3017 } while (buf & HW_CFG_LRST_);
3018
3019 ret = lan78xx_init_mac_address(dev);
3020 if (ret < 0)
3021 return ret;
3022
3023 /* save DEVID for later usage */
3024 ret = lan78xx_read_reg(dev, ID_REV, &buf);
3025 if (ret < 0)
3026 return ret;
3027
3028 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3029 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3030
3031 /* Respond to the IN token with a NAK */
3032 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3033 if (ret < 0)
3034 return ret;
3035
3036 buf |= USB_CFG_BIR_;
3037
3038 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3039 if (ret < 0)
3040 return ret;
3041
3042 /* Init LTM */
3043 ret = lan78xx_init_ltm(dev);
3044 if (ret < 0)
3045 return ret;
3046
3047 ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3048 if (ret < 0)
3049 return ret;
3050
3051 ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3052 if (ret < 0)
3053 return ret;
3054
3055 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3056 if (ret < 0)
3057 return ret;
3058
3059 buf |= HW_CFG_MEF_;
3060 buf |= HW_CFG_CLK125_EN_;
3061 buf |= HW_CFG_REFCLK25_EN_;
3062
3063 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3064 if (ret < 0)
3065 return ret;
3066
3067 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3068 if (ret < 0)
3069 return ret;
3070
3071 buf |= USB_CFG_BCE_;
3072
3073 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3074 if (ret < 0)
3075 return ret;
3076
3077 /* set FIFO sizes */
3078 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3079
3080 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3081 if (ret < 0)
3082 return ret;
3083
3084 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3085
3086 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3087 if (ret < 0)
3088 return ret;
3089
3090 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3091 if (ret < 0)
3092 return ret;
3093
3094 ret = lan78xx_write_reg(dev, FLOW, 0);
3095 if (ret < 0)
3096 return ret;
3097
3098 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3099 if (ret < 0)
3100 return ret;
3101
3102 /* Don't need rfe_ctl_lock during initialisation */
3103 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3104 if (ret < 0)
3105 return ret;
3106
3107 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3108
3109 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3110 if (ret < 0)
3111 return ret;
3112
3113 /* Enable or disable checksum offload engines */
3114 ret = lan78xx_set_features(dev->net, dev->net->features);
3115 if (ret < 0)
3116 return ret;
3117
3118 lan78xx_set_multicast(dev->net);
3119
3120 /* reset PHY */
3121 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3122 if (ret < 0)
3123 return ret;
3124
3125 buf |= PMT_CTL_PHY_RST_;
3126
3127 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3128 if (ret < 0)
3129 return ret;
3130
3131 timeout = jiffies + HZ;
3132 do {
3133 mdelay(1);
3134 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3135 if (ret < 0)
3136 return ret;
3137
3138 if (time_after(jiffies, timeout)) {
3139 netdev_warn(dev->net, "timeout waiting for PHY Reset");
3140 ret = -ETIMEDOUT;
3141 return ret;
3142 }
3143 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3144
3145 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3146 if (ret < 0)
3147 return ret;
3148
3149 /* LAN7801 only has RGMII mode */
3150 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3151 buf &= ~MAC_CR_GMII_EN_;
3152 /* Enable Auto Duplex and Auto speed */
3153 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3154 }
3155
3156 if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3157 dev->chipid == ID_REV_CHIP_ID_7850_) {
3158 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3159 if (!ret && sig != EEPROM_INDICATOR) {
3160 /* Implies there is no external eeprom. Set mac speed */
3161 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3162 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3163 }
3164 }
3165 ret = lan78xx_write_reg(dev, MAC_CR, buf);
3166 if (ret < 0)
3167 return ret;
3168
3169 ret = lan78xx_set_rx_max_frame_length(dev,
3170 RX_MAX_FRAME_LEN(dev->net->mtu));
3171
3172 return ret;
3173 }
3174
lan78xx_init_stats(struct lan78xx_net * dev)3175 static void lan78xx_init_stats(struct lan78xx_net *dev)
3176 {
3177 u32 *p;
3178 int i;
3179
3180 /* initialize for stats update
3181 * some counters are 20bits and some are 32bits
3182 */
3183 p = (u32 *)&dev->stats.rollover_max;
3184 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3185 p[i] = 0xFFFFF;
3186
3187 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3188 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3189 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3190 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3191 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3192 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3193 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3194 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3195 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3196 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3197
3198 set_bit(EVENT_STAT_UPDATE, &dev->flags);
3199 }
3200
lan78xx_open(struct net_device * net)3201 static int lan78xx_open(struct net_device *net)
3202 {
3203 struct lan78xx_net *dev = netdev_priv(net);
3204 int ret;
3205
3206 netif_dbg(dev, ifup, dev->net, "open device");
3207
3208 ret = usb_autopm_get_interface(dev->intf);
3209 if (ret < 0)
3210 return ret;
3211
3212 mutex_lock(&dev->dev_mutex);
3213
3214 phy_start(net->phydev);
3215
3216 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3217
3218 /* for Link Check */
3219 if (dev->urb_intr) {
3220 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3221 if (ret < 0) {
3222 netif_err(dev, ifup, dev->net,
3223 "intr submit %d\n", ret);
3224 goto done;
3225 }
3226 }
3227
3228 ret = lan78xx_flush_rx_fifo(dev);
3229 if (ret < 0)
3230 goto done;
3231 ret = lan78xx_flush_tx_fifo(dev);
3232 if (ret < 0)
3233 goto done;
3234
3235 ret = lan78xx_start_tx_path(dev);
3236 if (ret < 0)
3237 goto done;
3238 ret = lan78xx_start_rx_path(dev);
3239 if (ret < 0)
3240 goto done;
3241
3242 lan78xx_init_stats(dev);
3243
3244 set_bit(EVENT_DEV_OPEN, &dev->flags);
3245
3246 netif_start_queue(net);
3247
3248 dev->link_on = false;
3249
3250 napi_enable(&dev->napi);
3251
3252 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3253 done:
3254 mutex_unlock(&dev->dev_mutex);
3255
3256 if (ret < 0)
3257 usb_autopm_put_interface(dev->intf);
3258
3259 return ret;
3260 }
3261
lan78xx_terminate_urbs(struct lan78xx_net * dev)3262 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3263 {
3264 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3265 DECLARE_WAITQUEUE(wait, current);
3266 int temp;
3267
3268 /* ensure there are no more active urbs */
3269 add_wait_queue(&unlink_wakeup, &wait);
3270 set_current_state(TASK_UNINTERRUPTIBLE);
3271 dev->wait = &unlink_wakeup;
3272 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3273
3274 /* maybe wait for deletions to finish. */
3275 while (!skb_queue_empty(&dev->rxq) ||
3276 !skb_queue_empty(&dev->txq)) {
3277 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3278 set_current_state(TASK_UNINTERRUPTIBLE);
3279 netif_dbg(dev, ifdown, dev->net,
3280 "waited for %d urb completions", temp);
3281 }
3282 set_current_state(TASK_RUNNING);
3283 dev->wait = NULL;
3284 remove_wait_queue(&unlink_wakeup, &wait);
3285
3286 /* empty Rx done, Rx overflow and Tx pend queues
3287 */
3288 while (!skb_queue_empty(&dev->rxq_done)) {
3289 struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3290
3291 lan78xx_release_rx_buf(dev, skb);
3292 }
3293
3294 skb_queue_purge(&dev->rxq_overflow);
3295 skb_queue_purge(&dev->txq_pend);
3296 }
3297
lan78xx_stop(struct net_device * net)3298 static int lan78xx_stop(struct net_device *net)
3299 {
3300 struct lan78xx_net *dev = netdev_priv(net);
3301
3302 netif_dbg(dev, ifup, dev->net, "stop device");
3303
3304 mutex_lock(&dev->dev_mutex);
3305
3306 if (timer_pending(&dev->stat_monitor))
3307 del_timer_sync(&dev->stat_monitor);
3308
3309 clear_bit(EVENT_DEV_OPEN, &dev->flags);
3310 netif_stop_queue(net);
3311 napi_disable(&dev->napi);
3312
3313 lan78xx_terminate_urbs(dev);
3314
3315 netif_info(dev, ifdown, dev->net,
3316 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3317 net->stats.rx_packets, net->stats.tx_packets,
3318 net->stats.rx_errors, net->stats.tx_errors);
3319
3320 /* ignore errors that occur stopping the Tx and Rx data paths */
3321 lan78xx_stop_tx_path(dev);
3322 lan78xx_stop_rx_path(dev);
3323
3324 if (net->phydev)
3325 phy_stop(net->phydev);
3326
3327 usb_kill_urb(dev->urb_intr);
3328
3329 /* deferred work (task, timer, softirq) must also stop.
3330 * can't flush_scheduled_work() until we drop rtnl (later),
3331 * else workers could deadlock; so make workers a NOP.
3332 */
3333 clear_bit(EVENT_TX_HALT, &dev->flags);
3334 clear_bit(EVENT_RX_HALT, &dev->flags);
3335 clear_bit(EVENT_LINK_RESET, &dev->flags);
3336 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3337
3338 cancel_delayed_work_sync(&dev->wq);
3339
3340 usb_autopm_put_interface(dev->intf);
3341
3342 mutex_unlock(&dev->dev_mutex);
3343
3344 return 0;
3345 }
3346
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3347 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3348 struct sk_buff_head *list, enum skb_state state)
3349 {
3350 unsigned long flags;
3351 enum skb_state old_state;
3352 struct skb_data *entry = (struct skb_data *)skb->cb;
3353
3354 spin_lock_irqsave(&list->lock, flags);
3355 old_state = entry->state;
3356 entry->state = state;
3357
3358 __skb_unlink(skb, list);
3359 spin_unlock(&list->lock);
3360 spin_lock(&dev->rxq_done.lock);
3361
3362 __skb_queue_tail(&dev->rxq_done, skb);
3363 if (skb_queue_len(&dev->rxq_done) == 1)
3364 napi_schedule(&dev->napi);
3365
3366 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3367
3368 return old_state;
3369 }
3370
tx_complete(struct urb * urb)3371 static void tx_complete(struct urb *urb)
3372 {
3373 struct sk_buff *skb = (struct sk_buff *)urb->context;
3374 struct skb_data *entry = (struct skb_data *)skb->cb;
3375 struct lan78xx_net *dev = entry->dev;
3376
3377 if (urb->status == 0) {
3378 dev->net->stats.tx_packets += entry->num_of_packet;
3379 dev->net->stats.tx_bytes += entry->length;
3380 } else {
3381 dev->net->stats.tx_errors += entry->num_of_packet;
3382
3383 switch (urb->status) {
3384 case -EPIPE:
3385 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3386 break;
3387
3388 /* software-driven interface shutdown */
3389 case -ECONNRESET:
3390 case -ESHUTDOWN:
3391 netif_dbg(dev, tx_err, dev->net,
3392 "tx err interface gone %d\n",
3393 entry->urb->status);
3394 break;
3395
3396 case -EPROTO:
3397 case -ETIME:
3398 case -EILSEQ:
3399 netif_stop_queue(dev->net);
3400 netif_dbg(dev, tx_err, dev->net,
3401 "tx err queue stopped %d\n",
3402 entry->urb->status);
3403 break;
3404 default:
3405 netif_dbg(dev, tx_err, dev->net,
3406 "unknown tx err %d\n",
3407 entry->urb->status);
3408 break;
3409 }
3410 }
3411
3412 usb_autopm_put_interface_async(dev->intf);
3413
3414 skb_unlink(skb, &dev->txq);
3415
3416 lan78xx_release_tx_buf(dev, skb);
3417
3418 /* Re-schedule NAPI if Tx data pending but no URBs in progress.
3419 */
3420 if (skb_queue_empty(&dev->txq) &&
3421 !skb_queue_empty(&dev->txq_pend))
3422 napi_schedule(&dev->napi);
3423 }
3424
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3425 static void lan78xx_queue_skb(struct sk_buff_head *list,
3426 struct sk_buff *newsk, enum skb_state state)
3427 {
3428 struct skb_data *entry = (struct skb_data *)newsk->cb;
3429
3430 __skb_queue_tail(list, newsk);
3431 entry->state = state;
3432 }
3433
lan78xx_tx_urb_space(struct lan78xx_net * dev)3434 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3435 {
3436 return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3437 }
3438
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3439 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3440 {
3441 return dev->tx_pend_data_len;
3442 }
3443
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3444 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3445 struct sk_buff *skb,
3446 unsigned int *tx_pend_data_len)
3447 {
3448 unsigned long flags;
3449
3450 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3451
3452 __skb_queue_tail(&dev->txq_pend, skb);
3453
3454 dev->tx_pend_data_len += skb->len;
3455 *tx_pend_data_len = dev->tx_pend_data_len;
3456
3457 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3458 }
3459
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3460 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3461 struct sk_buff *skb,
3462 unsigned int *tx_pend_data_len)
3463 {
3464 unsigned long flags;
3465
3466 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3467
3468 __skb_queue_head(&dev->txq_pend, skb);
3469
3470 dev->tx_pend_data_len += skb->len;
3471 *tx_pend_data_len = dev->tx_pend_data_len;
3472
3473 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3474 }
3475
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3476 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3477 struct sk_buff **skb,
3478 unsigned int *tx_pend_data_len)
3479 {
3480 unsigned long flags;
3481
3482 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3483
3484 *skb = __skb_dequeue(&dev->txq_pend);
3485 if (*skb)
3486 dev->tx_pend_data_len -= (*skb)->len;
3487 *tx_pend_data_len = dev->tx_pend_data_len;
3488
3489 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3490 }
3491
3492 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3493 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3494 {
3495 struct lan78xx_net *dev = netdev_priv(net);
3496 unsigned int tx_pend_data_len;
3497
3498 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3499 schedule_delayed_work(&dev->wq, 0);
3500
3501 skb_tx_timestamp(skb);
3502
3503 lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3504
3505 /* Set up a Tx URB if none is in progress */
3506
3507 if (skb_queue_empty(&dev->txq))
3508 napi_schedule(&dev->napi);
3509
3510 /* Stop stack Tx queue if we have enough data to fill
3511 * all the free Tx URBs.
3512 */
3513 if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3514 netif_stop_queue(net);
3515
3516 netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3517 tx_pend_data_len, lan78xx_tx_urb_space(dev));
3518
3519 /* Kick off transmission of pending data */
3520
3521 if (!skb_queue_empty(&dev->txq_free))
3522 napi_schedule(&dev->napi);
3523 }
3524
3525 return NETDEV_TX_OK;
3526 }
3527
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3528 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3529 {
3530 struct lan78xx_priv *pdata = NULL;
3531 int ret;
3532 int i;
3533
3534 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3535
3536 pdata = (struct lan78xx_priv *)(dev->data[0]);
3537 if (!pdata) {
3538 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3539 return -ENOMEM;
3540 }
3541
3542 pdata->dev = dev;
3543
3544 spin_lock_init(&pdata->rfe_ctl_lock);
3545 mutex_init(&pdata->dataport_mutex);
3546
3547 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3548
3549 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3550 pdata->vlan_table[i] = 0;
3551
3552 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3553
3554 dev->net->features = 0;
3555
3556 if (DEFAULT_TX_CSUM_ENABLE)
3557 dev->net->features |= NETIF_F_HW_CSUM;
3558
3559 if (DEFAULT_RX_CSUM_ENABLE)
3560 dev->net->features |= NETIF_F_RXCSUM;
3561
3562 if (DEFAULT_TSO_CSUM_ENABLE)
3563 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3564
3565 if (DEFAULT_VLAN_RX_OFFLOAD)
3566 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3567
3568 if (DEFAULT_VLAN_FILTER_ENABLE)
3569 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3570
3571 dev->net->hw_features = dev->net->features;
3572
3573 ret = lan78xx_setup_irq_domain(dev);
3574 if (ret < 0) {
3575 netdev_warn(dev->net,
3576 "lan78xx_setup_irq_domain() failed : %d", ret);
3577 goto out1;
3578 }
3579
3580 /* Init all registers */
3581 ret = lan78xx_reset(dev);
3582 if (ret) {
3583 netdev_warn(dev->net, "Registers INIT FAILED....");
3584 goto out2;
3585 }
3586
3587 ret = lan78xx_mdio_init(dev);
3588 if (ret) {
3589 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3590 goto out2;
3591 }
3592
3593 dev->net->flags |= IFF_MULTICAST;
3594
3595 pdata->wol = WAKE_MAGIC;
3596
3597 return ret;
3598
3599 out2:
3600 lan78xx_remove_irq_domain(dev);
3601
3602 out1:
3603 netdev_warn(dev->net, "Bind routine FAILED");
3604 cancel_work_sync(&pdata->set_multicast);
3605 cancel_work_sync(&pdata->set_vlan);
3606 kfree(pdata);
3607 return ret;
3608 }
3609
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3610 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3611 {
3612 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3613
3614 lan78xx_remove_irq_domain(dev);
3615
3616 lan78xx_remove_mdio(dev);
3617
3618 if (pdata) {
3619 cancel_work_sync(&pdata->set_multicast);
3620 cancel_work_sync(&pdata->set_vlan);
3621 netif_dbg(dev, ifdown, dev->net, "free pdata");
3622 kfree(pdata);
3623 pdata = NULL;
3624 dev->data[0] = 0;
3625 }
3626 }
3627
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3628 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3629 struct sk_buff *skb,
3630 u32 rx_cmd_a, u32 rx_cmd_b)
3631 {
3632 /* HW Checksum offload appears to be flawed if used when not stripping
3633 * VLAN headers. Drop back to S/W checksums under these conditions.
3634 */
3635 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3636 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3637 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3638 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3639 skb->ip_summed = CHECKSUM_NONE;
3640 } else {
3641 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3642 skb->ip_summed = CHECKSUM_COMPLETE;
3643 }
3644 }
3645
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3646 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3647 struct sk_buff *skb,
3648 u32 rx_cmd_a, u32 rx_cmd_b)
3649 {
3650 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3651 (rx_cmd_a & RX_CMD_A_FVTG_))
3652 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3653 (rx_cmd_b & 0xffff));
3654 }
3655
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3656 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3657 {
3658 dev->net->stats.rx_packets++;
3659 dev->net->stats.rx_bytes += skb->len;
3660
3661 skb->protocol = eth_type_trans(skb, dev->net);
3662
3663 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3664 skb->len + sizeof(struct ethhdr), skb->protocol);
3665 memset(skb->cb, 0, sizeof(struct skb_data));
3666
3667 if (skb_defer_rx_timestamp(skb))
3668 return;
3669
3670 napi_gro_receive(&dev->napi, skb);
3671 }
3672
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3673 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3674 int budget, int *work_done)
3675 {
3676 if (skb->len < RX_SKB_MIN_LEN)
3677 return 0;
3678
3679 /* Extract frames from the URB buffer and pass each one to
3680 * the stack in a new NAPI SKB.
3681 */
3682 while (skb->len > 0) {
3683 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3684 u16 rx_cmd_c;
3685 unsigned char *packet;
3686
3687 rx_cmd_a = get_unaligned_le32(skb->data);
3688 skb_pull(skb, sizeof(rx_cmd_a));
3689
3690 rx_cmd_b = get_unaligned_le32(skb->data);
3691 skb_pull(skb, sizeof(rx_cmd_b));
3692
3693 rx_cmd_c = get_unaligned_le16(skb->data);
3694 skb_pull(skb, sizeof(rx_cmd_c));
3695
3696 packet = skb->data;
3697
3698 /* get the packet length */
3699 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3700 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3701
3702 if (unlikely(size > skb->len)) {
3703 netif_dbg(dev, rx_err, dev->net,
3704 "size err rx_cmd_a=0x%08x\n",
3705 rx_cmd_a);
3706 return 0;
3707 }
3708
3709 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3710 netif_dbg(dev, rx_err, dev->net,
3711 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3712 } else {
3713 u32 frame_len;
3714 struct sk_buff *skb2;
3715
3716 if (unlikely(size < ETH_FCS_LEN)) {
3717 netif_dbg(dev, rx_err, dev->net,
3718 "size err rx_cmd_a=0x%08x\n",
3719 rx_cmd_a);
3720 return 0;
3721 }
3722
3723 frame_len = size - ETH_FCS_LEN;
3724
3725 skb2 = napi_alloc_skb(&dev->napi, frame_len);
3726 if (!skb2)
3727 return 0;
3728
3729 memcpy(skb2->data, packet, frame_len);
3730
3731 skb_put(skb2, frame_len);
3732
3733 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3734 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3735
3736 /* Processing of the URB buffer must complete once
3737 * it has started. If the NAPI work budget is exhausted
3738 * while frames remain they are added to the overflow
3739 * queue for delivery in the next NAPI polling cycle.
3740 */
3741 if (*work_done < budget) {
3742 lan78xx_skb_return(dev, skb2);
3743 ++(*work_done);
3744 } else {
3745 skb_queue_tail(&dev->rxq_overflow, skb2);
3746 }
3747 }
3748
3749 skb_pull(skb, size);
3750
3751 /* skip padding bytes before the next frame starts */
3752 if (skb->len)
3753 skb_pull(skb, align_count);
3754 }
3755
3756 return 1;
3757 }
3758
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3759 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3760 int budget, int *work_done)
3761 {
3762 if (!lan78xx_rx(dev, skb, budget, work_done)) {
3763 netif_dbg(dev, rx_err, dev->net, "drop\n");
3764 dev->net->stats.rx_errors++;
3765 }
3766 }
3767
rx_complete(struct urb * urb)3768 static void rx_complete(struct urb *urb)
3769 {
3770 struct sk_buff *skb = (struct sk_buff *)urb->context;
3771 struct skb_data *entry = (struct skb_data *)skb->cb;
3772 struct lan78xx_net *dev = entry->dev;
3773 int urb_status = urb->status;
3774 enum skb_state state;
3775
3776 netif_dbg(dev, rx_status, dev->net,
3777 "rx done: status %d", urb->status);
3778
3779 skb_put(skb, urb->actual_length);
3780 state = rx_done;
3781
3782 if (urb != entry->urb)
3783 netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3784
3785 switch (urb_status) {
3786 case 0:
3787 if (skb->len < RX_SKB_MIN_LEN) {
3788 state = rx_cleanup;
3789 dev->net->stats.rx_errors++;
3790 dev->net->stats.rx_length_errors++;
3791 netif_dbg(dev, rx_err, dev->net,
3792 "rx length %d\n", skb->len);
3793 }
3794 usb_mark_last_busy(dev->udev);
3795 break;
3796 case -EPIPE:
3797 dev->net->stats.rx_errors++;
3798 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3799 fallthrough;
3800 case -ECONNRESET: /* async unlink */
3801 case -ESHUTDOWN: /* hardware gone */
3802 netif_dbg(dev, ifdown, dev->net,
3803 "rx shutdown, code %d\n", urb_status);
3804 state = rx_cleanup;
3805 break;
3806 case -EPROTO:
3807 case -ETIME:
3808 case -EILSEQ:
3809 dev->net->stats.rx_errors++;
3810 state = rx_cleanup;
3811 break;
3812
3813 /* data overrun ... flush fifo? */
3814 case -EOVERFLOW:
3815 dev->net->stats.rx_over_errors++;
3816 fallthrough;
3817
3818 default:
3819 state = rx_cleanup;
3820 dev->net->stats.rx_errors++;
3821 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3822 break;
3823 }
3824
3825 state = defer_bh(dev, skb, &dev->rxq, state);
3826 }
3827
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)3828 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3829 {
3830 struct skb_data *entry = (struct skb_data *)skb->cb;
3831 size_t size = dev->rx_urb_size;
3832 struct urb *urb = entry->urb;
3833 unsigned long lockflags;
3834 int ret = 0;
3835
3836 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3837 skb->data, size, rx_complete, skb);
3838
3839 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3840
3841 if (netif_device_present(dev->net) &&
3842 netif_running(dev->net) &&
3843 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3844 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3845 ret = usb_submit_urb(urb, flags);
3846 switch (ret) {
3847 case 0:
3848 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3849 break;
3850 case -EPIPE:
3851 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3852 break;
3853 case -ENODEV:
3854 case -ENOENT:
3855 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3856 netif_device_detach(dev->net);
3857 break;
3858 case -EHOSTUNREACH:
3859 ret = -ENOLINK;
3860 napi_schedule(&dev->napi);
3861 break;
3862 default:
3863 netif_dbg(dev, rx_err, dev->net,
3864 "rx submit, %d\n", ret);
3865 napi_schedule(&dev->napi);
3866 break;
3867 }
3868 } else {
3869 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3870 ret = -ENOLINK;
3871 }
3872 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3873
3874 if (ret)
3875 lan78xx_release_rx_buf(dev, skb);
3876
3877 return ret;
3878 }
3879
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)3880 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3881 {
3882 struct sk_buff *rx_buf;
3883
3884 /* Ensure the maximum number of Rx URBs is submitted
3885 */
3886 while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3887 if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3888 break;
3889 }
3890 }
3891
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)3892 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3893 struct sk_buff *rx_buf)
3894 {
3895 /* reset SKB data pointers */
3896
3897 rx_buf->data = rx_buf->head;
3898 skb_reset_tail_pointer(rx_buf);
3899 rx_buf->len = 0;
3900 rx_buf->data_len = 0;
3901
3902 rx_submit(dev, rx_buf, GFP_ATOMIC);
3903 }
3904
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)3905 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3906 {
3907 u32 tx_cmd_a;
3908 u32 tx_cmd_b;
3909
3910 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3911
3912 if (skb->ip_summed == CHECKSUM_PARTIAL)
3913 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3914
3915 tx_cmd_b = 0;
3916 if (skb_is_gso(skb)) {
3917 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3918
3919 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3920
3921 tx_cmd_a |= TX_CMD_A_LSO_;
3922 }
3923
3924 if (skb_vlan_tag_present(skb)) {
3925 tx_cmd_a |= TX_CMD_A_IVTG_;
3926 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3927 }
3928
3929 put_unaligned_le32(tx_cmd_a, buffer);
3930 put_unaligned_le32(tx_cmd_b, buffer + 4);
3931 }
3932
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)3933 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3934 struct sk_buff *tx_buf)
3935 {
3936 struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3937 int remain = dev->tx_urb_size;
3938 u8 *tx_data = tx_buf->data;
3939 u32 urb_len = 0;
3940
3941 entry->num_of_packet = 0;
3942 entry->length = 0;
3943
3944 /* Work through the pending SKBs and copy the data of each SKB into
3945 * the URB buffer if there room for all the SKB data.
3946 *
3947 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3948 */
3949 while (remain >= TX_SKB_MIN_LEN) {
3950 unsigned int pending_bytes;
3951 unsigned int align_bytes;
3952 struct sk_buff *skb;
3953 unsigned int len;
3954
3955 lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3956
3957 if (!skb)
3958 break;
3959
3960 align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3961 TX_ALIGNMENT;
3962 len = align_bytes + TX_CMD_LEN + skb->len;
3963 if (len > remain) {
3964 lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3965 break;
3966 }
3967
3968 tx_data += align_bytes;
3969
3970 lan78xx_fill_tx_cmd_words(skb, tx_data);
3971 tx_data += TX_CMD_LEN;
3972
3973 len = skb->len;
3974 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3975 struct net_device_stats *stats = &dev->net->stats;
3976
3977 stats->tx_dropped++;
3978 dev_kfree_skb_any(skb);
3979 tx_data -= TX_CMD_LEN;
3980 continue;
3981 }
3982
3983 tx_data += len;
3984 entry->length += len;
3985 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3986
3987 dev_kfree_skb_any(skb);
3988
3989 urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3990
3991 remain = dev->tx_urb_size - urb_len;
3992 }
3993
3994 skb_put(tx_buf, urb_len);
3995
3996 return entry;
3997 }
3998
lan78xx_tx_bh(struct lan78xx_net * dev)3999 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4000 {
4001 int ret;
4002
4003 /* Start the stack Tx queue if it was stopped
4004 */
4005 netif_tx_lock(dev->net);
4006 if (netif_queue_stopped(dev->net)) {
4007 if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4008 netif_wake_queue(dev->net);
4009 }
4010 netif_tx_unlock(dev->net);
4011
4012 /* Go through the Tx pending queue and set up URBs to transfer
4013 * the data to the device. Stop if no more pending data or URBs,
4014 * or if an error occurs when a URB is submitted.
4015 */
4016 do {
4017 struct skb_data *entry;
4018 struct sk_buff *tx_buf;
4019 unsigned long flags;
4020
4021 if (skb_queue_empty(&dev->txq_pend))
4022 break;
4023
4024 tx_buf = lan78xx_get_tx_buf(dev);
4025 if (!tx_buf)
4026 break;
4027
4028 entry = lan78xx_tx_buf_fill(dev, tx_buf);
4029
4030 spin_lock_irqsave(&dev->txq.lock, flags);
4031 ret = usb_autopm_get_interface_async(dev->intf);
4032 if (ret < 0) {
4033 spin_unlock_irqrestore(&dev->txq.lock, flags);
4034 goto out;
4035 }
4036
4037 usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4038 tx_buf->data, tx_buf->len, tx_complete,
4039 tx_buf);
4040
4041 if (tx_buf->len % dev->maxpacket == 0) {
4042 /* send USB_ZERO_PACKET */
4043 entry->urb->transfer_flags |= URB_ZERO_PACKET;
4044 }
4045
4046 #ifdef CONFIG_PM
4047 /* if device is asleep stop outgoing packet processing */
4048 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4049 usb_anchor_urb(entry->urb, &dev->deferred);
4050 netif_stop_queue(dev->net);
4051 spin_unlock_irqrestore(&dev->txq.lock, flags);
4052 netdev_dbg(dev->net,
4053 "Delaying transmission for resumption\n");
4054 return;
4055 }
4056 #endif
4057 ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4058 switch (ret) {
4059 case 0:
4060 netif_trans_update(dev->net);
4061 lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4062 break;
4063 case -EPIPE:
4064 netif_stop_queue(dev->net);
4065 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4066 usb_autopm_put_interface_async(dev->intf);
4067 break;
4068 case -ENODEV:
4069 case -ENOENT:
4070 netif_dbg(dev, tx_err, dev->net,
4071 "tx submit urb err %d (disconnected?)", ret);
4072 netif_device_detach(dev->net);
4073 break;
4074 default:
4075 usb_autopm_put_interface_async(dev->intf);
4076 netif_dbg(dev, tx_err, dev->net,
4077 "tx submit urb err %d\n", ret);
4078 break;
4079 }
4080
4081 spin_unlock_irqrestore(&dev->txq.lock, flags);
4082
4083 if (ret) {
4084 netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4085 out:
4086 dev->net->stats.tx_dropped += entry->num_of_packet;
4087 lan78xx_release_tx_buf(dev, tx_buf);
4088 }
4089 } while (ret == 0);
4090 }
4091
lan78xx_bh(struct lan78xx_net * dev,int budget)4092 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4093 {
4094 struct sk_buff_head done;
4095 struct sk_buff *rx_buf;
4096 struct skb_data *entry;
4097 unsigned long flags;
4098 int work_done = 0;
4099
4100 /* Pass frames received in the last NAPI cycle before
4101 * working on newly completed URBs.
4102 */
4103 while (!skb_queue_empty(&dev->rxq_overflow)) {
4104 lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4105 ++work_done;
4106 }
4107
4108 /* Take a snapshot of the done queue and move items to a
4109 * temporary queue. Rx URB completions will continue to add
4110 * to the done queue.
4111 */
4112 __skb_queue_head_init(&done);
4113
4114 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4115 skb_queue_splice_init(&dev->rxq_done, &done);
4116 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4117
4118 /* Extract receive frames from completed URBs and
4119 * pass them to the stack. Re-submit each completed URB.
4120 */
4121 while ((work_done < budget) &&
4122 (rx_buf = __skb_dequeue(&done))) {
4123 entry = (struct skb_data *)(rx_buf->cb);
4124 switch (entry->state) {
4125 case rx_done:
4126 rx_process(dev, rx_buf, budget, &work_done);
4127 break;
4128 case rx_cleanup:
4129 break;
4130 default:
4131 netdev_dbg(dev->net, "rx buf state %d\n",
4132 entry->state);
4133 break;
4134 }
4135
4136 lan78xx_rx_urb_resubmit(dev, rx_buf);
4137 }
4138
4139 /* If budget was consumed before processing all the URBs put them
4140 * back on the front of the done queue. They will be first to be
4141 * processed in the next NAPI cycle.
4142 */
4143 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4144 skb_queue_splice(&done, &dev->rxq_done);
4145 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4146
4147 if (netif_device_present(dev->net) && netif_running(dev->net)) {
4148 /* reset update timer delta */
4149 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4150 dev->delta = 1;
4151 mod_timer(&dev->stat_monitor,
4152 jiffies + STAT_UPDATE_TIMER);
4153 }
4154
4155 /* Submit all free Rx URBs */
4156
4157 if (!test_bit(EVENT_RX_HALT, &dev->flags))
4158 lan78xx_rx_urb_submit_all(dev);
4159
4160 /* Submit new Tx URBs */
4161
4162 lan78xx_tx_bh(dev);
4163 }
4164
4165 return work_done;
4166 }
4167
lan78xx_poll(struct napi_struct * napi,int budget)4168 static int lan78xx_poll(struct napi_struct *napi, int budget)
4169 {
4170 struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4171 int result = budget;
4172 int work_done;
4173
4174 /* Don't do any work if the device is suspended */
4175
4176 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4177 napi_complete_done(napi, 0);
4178 return 0;
4179 }
4180
4181 /* Process completed URBs and submit new URBs */
4182
4183 work_done = lan78xx_bh(dev, budget);
4184
4185 if (work_done < budget) {
4186 napi_complete_done(napi, work_done);
4187
4188 /* Start a new polling cycle if data was received or
4189 * data is waiting to be transmitted.
4190 */
4191 if (!skb_queue_empty(&dev->rxq_done)) {
4192 napi_schedule(napi);
4193 } else if (netif_carrier_ok(dev->net)) {
4194 if (skb_queue_empty(&dev->txq) &&
4195 !skb_queue_empty(&dev->txq_pend)) {
4196 napi_schedule(napi);
4197 } else {
4198 netif_tx_lock(dev->net);
4199 if (netif_queue_stopped(dev->net)) {
4200 netif_wake_queue(dev->net);
4201 napi_schedule(napi);
4202 }
4203 netif_tx_unlock(dev->net);
4204 }
4205 }
4206 result = work_done;
4207 }
4208
4209 return result;
4210 }
4211
lan78xx_delayedwork(struct work_struct * work)4212 static void lan78xx_delayedwork(struct work_struct *work)
4213 {
4214 int status;
4215 struct lan78xx_net *dev;
4216
4217 dev = container_of(work, struct lan78xx_net, wq.work);
4218
4219 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4220 return;
4221
4222 if (usb_autopm_get_interface(dev->intf) < 0)
4223 return;
4224
4225 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4226 unlink_urbs(dev, &dev->txq);
4227
4228 status = usb_clear_halt(dev->udev, dev->pipe_out);
4229 if (status < 0 &&
4230 status != -EPIPE &&
4231 status != -ESHUTDOWN) {
4232 if (netif_msg_tx_err(dev))
4233 netdev_err(dev->net,
4234 "can't clear tx halt, status %d\n",
4235 status);
4236 } else {
4237 clear_bit(EVENT_TX_HALT, &dev->flags);
4238 if (status != -ESHUTDOWN)
4239 netif_wake_queue(dev->net);
4240 }
4241 }
4242
4243 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4244 unlink_urbs(dev, &dev->rxq);
4245 status = usb_clear_halt(dev->udev, dev->pipe_in);
4246 if (status < 0 &&
4247 status != -EPIPE &&
4248 status != -ESHUTDOWN) {
4249 if (netif_msg_rx_err(dev))
4250 netdev_err(dev->net,
4251 "can't clear rx halt, status %d\n",
4252 status);
4253 } else {
4254 clear_bit(EVENT_RX_HALT, &dev->flags);
4255 napi_schedule(&dev->napi);
4256 }
4257 }
4258
4259 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4260 int ret = 0;
4261
4262 clear_bit(EVENT_LINK_RESET, &dev->flags);
4263 if (lan78xx_link_reset(dev) < 0) {
4264 netdev_info(dev->net, "link reset failed (%d)\n",
4265 ret);
4266 }
4267 }
4268
4269 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4270 lan78xx_update_stats(dev);
4271
4272 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4273
4274 mod_timer(&dev->stat_monitor,
4275 jiffies + (STAT_UPDATE_TIMER * dev->delta));
4276
4277 dev->delta = min((dev->delta * 2), 50);
4278 }
4279
4280 usb_autopm_put_interface(dev->intf);
4281 }
4282
intr_complete(struct urb * urb)4283 static void intr_complete(struct urb *urb)
4284 {
4285 struct lan78xx_net *dev = urb->context;
4286 int status = urb->status;
4287
4288 switch (status) {
4289 /* success */
4290 case 0:
4291 lan78xx_status(dev, urb);
4292 break;
4293
4294 /* software-driven interface shutdown */
4295 case -ENOENT: /* urb killed */
4296 case -ENODEV: /* hardware gone */
4297 case -ESHUTDOWN: /* hardware gone */
4298 netif_dbg(dev, ifdown, dev->net,
4299 "intr shutdown, code %d\n", status);
4300 return;
4301
4302 /* NOTE: not throttling like RX/TX, since this endpoint
4303 * already polls infrequently
4304 */
4305 default:
4306 netdev_dbg(dev->net, "intr status %d\n", status);
4307 break;
4308 }
4309
4310 if (!netif_device_present(dev->net) ||
4311 !netif_running(dev->net)) {
4312 netdev_warn(dev->net, "not submitting new status URB");
4313 return;
4314 }
4315
4316 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4317 status = usb_submit_urb(urb, GFP_ATOMIC);
4318
4319 switch (status) {
4320 case 0:
4321 break;
4322 case -ENODEV:
4323 case -ENOENT:
4324 netif_dbg(dev, timer, dev->net,
4325 "intr resubmit %d (disconnect?)", status);
4326 netif_device_detach(dev->net);
4327 break;
4328 default:
4329 netif_err(dev, timer, dev->net,
4330 "intr resubmit --> %d\n", status);
4331 break;
4332 }
4333 }
4334
lan78xx_disconnect(struct usb_interface * intf)4335 static void lan78xx_disconnect(struct usb_interface *intf)
4336 {
4337 struct lan78xx_net *dev;
4338 struct usb_device *udev;
4339 struct net_device *net;
4340 struct phy_device *phydev;
4341
4342 dev = usb_get_intfdata(intf);
4343 usb_set_intfdata(intf, NULL);
4344 if (!dev)
4345 return;
4346
4347 netif_napi_del(&dev->napi);
4348
4349 udev = interface_to_usbdev(intf);
4350 net = dev->net;
4351
4352 unregister_netdev(net);
4353
4354 timer_shutdown_sync(&dev->stat_monitor);
4355 set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4356 cancel_delayed_work_sync(&dev->wq);
4357
4358 phydev = net->phydev;
4359
4360 phy_disconnect(net->phydev);
4361
4362 if (phy_is_pseudo_fixed_link(phydev)) {
4363 fixed_phy_unregister(phydev);
4364 phy_device_free(phydev);
4365 }
4366
4367 usb_scuttle_anchored_urbs(&dev->deferred);
4368
4369 lan78xx_unbind(dev, intf);
4370
4371 lan78xx_free_tx_resources(dev);
4372 lan78xx_free_rx_resources(dev);
4373
4374 usb_kill_urb(dev->urb_intr);
4375 usb_free_urb(dev->urb_intr);
4376
4377 free_netdev(net);
4378 usb_put_dev(udev);
4379 }
4380
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4381 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4382 {
4383 struct lan78xx_net *dev = netdev_priv(net);
4384
4385 unlink_urbs(dev, &dev->txq);
4386 napi_schedule(&dev->napi);
4387 }
4388
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4389 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4390 struct net_device *netdev,
4391 netdev_features_t features)
4392 {
4393 struct lan78xx_net *dev = netdev_priv(netdev);
4394
4395 if (skb->len > LAN78XX_TSO_SIZE(dev))
4396 features &= ~NETIF_F_GSO_MASK;
4397
4398 features = vlan_features_check(skb, features);
4399 features = vxlan_features_check(skb, features);
4400
4401 return features;
4402 }
4403
4404 static const struct net_device_ops lan78xx_netdev_ops = {
4405 .ndo_open = lan78xx_open,
4406 .ndo_stop = lan78xx_stop,
4407 .ndo_start_xmit = lan78xx_start_xmit,
4408 .ndo_tx_timeout = lan78xx_tx_timeout,
4409 .ndo_change_mtu = lan78xx_change_mtu,
4410 .ndo_set_mac_address = lan78xx_set_mac_addr,
4411 .ndo_validate_addr = eth_validate_addr,
4412 .ndo_eth_ioctl = phy_do_ioctl_running,
4413 .ndo_set_rx_mode = lan78xx_set_multicast,
4414 .ndo_set_features = lan78xx_set_features,
4415 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
4416 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
4417 .ndo_features_check = lan78xx_features_check,
4418 };
4419
lan78xx_stat_monitor(struct timer_list * t)4420 static void lan78xx_stat_monitor(struct timer_list *t)
4421 {
4422 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4423
4424 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4425 }
4426
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4427 static int lan78xx_probe(struct usb_interface *intf,
4428 const struct usb_device_id *id)
4429 {
4430 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4431 struct lan78xx_net *dev;
4432 struct net_device *netdev;
4433 struct usb_device *udev;
4434 int ret;
4435 unsigned int maxp;
4436 unsigned int period;
4437 u8 *buf = NULL;
4438
4439 udev = interface_to_usbdev(intf);
4440 udev = usb_get_dev(udev);
4441
4442 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4443 if (!netdev) {
4444 dev_err(&intf->dev, "Error: OOM\n");
4445 ret = -ENOMEM;
4446 goto out1;
4447 }
4448
4449 /* netdev_printk() needs this */
4450 SET_NETDEV_DEV(netdev, &intf->dev);
4451
4452 dev = netdev_priv(netdev);
4453 dev->udev = udev;
4454 dev->intf = intf;
4455 dev->net = netdev;
4456 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4457 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4458
4459 skb_queue_head_init(&dev->rxq);
4460 skb_queue_head_init(&dev->txq);
4461 skb_queue_head_init(&dev->rxq_done);
4462 skb_queue_head_init(&dev->txq_pend);
4463 skb_queue_head_init(&dev->rxq_overflow);
4464 mutex_init(&dev->mdiobus_mutex);
4465 mutex_init(&dev->dev_mutex);
4466
4467 ret = lan78xx_urb_config_init(dev);
4468 if (ret < 0)
4469 goto out2;
4470
4471 ret = lan78xx_alloc_tx_resources(dev);
4472 if (ret < 0)
4473 goto out2;
4474
4475 ret = lan78xx_alloc_rx_resources(dev);
4476 if (ret < 0)
4477 goto out3;
4478
4479 /* MTU range: 68 - 9000 */
4480 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4481
4482 netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4483
4484 netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4485
4486 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4487 init_usb_anchor(&dev->deferred);
4488
4489 netdev->netdev_ops = &lan78xx_netdev_ops;
4490 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4491 netdev->ethtool_ops = &lan78xx_ethtool_ops;
4492
4493 dev->delta = 1;
4494 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4495
4496 mutex_init(&dev->stats.access_lock);
4497
4498 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4499 ret = -ENODEV;
4500 goto out4;
4501 }
4502
4503 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4504 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4505 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4506 ret = -ENODEV;
4507 goto out4;
4508 }
4509
4510 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4511 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4512 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4513 ret = -ENODEV;
4514 goto out4;
4515 }
4516
4517 ep_intr = &intf->cur_altsetting->endpoint[2];
4518 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4519 ret = -ENODEV;
4520 goto out4;
4521 }
4522
4523 dev->pipe_intr = usb_rcvintpipe(dev->udev,
4524 usb_endpoint_num(&ep_intr->desc));
4525
4526 ret = lan78xx_bind(dev, intf);
4527 if (ret < 0)
4528 goto out4;
4529
4530 period = ep_intr->desc.bInterval;
4531 maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4532
4533 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4534 if (!dev->urb_intr) {
4535 ret = -ENOMEM;
4536 goto out5;
4537 }
4538
4539 buf = kmalloc(maxp, GFP_KERNEL);
4540 if (!buf) {
4541 ret = -ENOMEM;
4542 goto free_urbs;
4543 }
4544
4545 usb_fill_int_urb(dev->urb_intr, dev->udev,
4546 dev->pipe_intr, buf, maxp,
4547 intr_complete, dev, period);
4548 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4549
4550 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4551
4552 /* Reject broken descriptors. */
4553 if (dev->maxpacket == 0) {
4554 ret = -ENODEV;
4555 goto free_urbs;
4556 }
4557
4558 /* driver requires remote-wakeup capability during autosuspend. */
4559 intf->needs_remote_wakeup = 1;
4560
4561 ret = lan78xx_phy_init(dev);
4562 if (ret < 0)
4563 goto free_urbs;
4564
4565 ret = register_netdev(netdev);
4566 if (ret != 0) {
4567 netif_err(dev, probe, netdev, "couldn't register the device\n");
4568 goto out8;
4569 }
4570
4571 usb_set_intfdata(intf, dev);
4572
4573 ret = device_set_wakeup_enable(&udev->dev, true);
4574
4575 /* Default delay of 2sec has more overhead than advantage.
4576 * Set to 10sec as default.
4577 */
4578 pm_runtime_set_autosuspend_delay(&udev->dev,
4579 DEFAULT_AUTOSUSPEND_DELAY);
4580
4581 return 0;
4582
4583 out8:
4584 phy_disconnect(netdev->phydev);
4585 free_urbs:
4586 usb_free_urb(dev->urb_intr);
4587 out5:
4588 lan78xx_unbind(dev, intf);
4589 out4:
4590 netif_napi_del(&dev->napi);
4591 lan78xx_free_rx_resources(dev);
4592 out3:
4593 lan78xx_free_tx_resources(dev);
4594 out2:
4595 free_netdev(netdev);
4596 out1:
4597 usb_put_dev(udev);
4598
4599 return ret;
4600 }
4601
lan78xx_wakeframe_crc16(const u8 * buf,int len)4602 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4603 {
4604 const u16 crc16poly = 0x8005;
4605 int i;
4606 u16 bit, crc, msb;
4607 u8 data;
4608
4609 crc = 0xFFFF;
4610 for (i = 0; i < len; i++) {
4611 data = *buf++;
4612 for (bit = 0; bit < 8; bit++) {
4613 msb = crc >> 15;
4614 crc <<= 1;
4615
4616 if (msb ^ (u16)(data & 1)) {
4617 crc ^= crc16poly;
4618 crc |= (u16)0x0001U;
4619 }
4620 data >>= 1;
4621 }
4622 }
4623
4624 return crc;
4625 }
4626
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4627 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4628 {
4629 u32 buf;
4630 int ret;
4631
4632 ret = lan78xx_stop_tx_path(dev);
4633 if (ret < 0)
4634 return ret;
4635
4636 ret = lan78xx_stop_rx_path(dev);
4637 if (ret < 0)
4638 return ret;
4639
4640 /* auto suspend (selective suspend) */
4641
4642 ret = lan78xx_write_reg(dev, WUCSR, 0);
4643 if (ret < 0)
4644 return ret;
4645 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4646 if (ret < 0)
4647 return ret;
4648 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4649 if (ret < 0)
4650 return ret;
4651
4652 /* set goodframe wakeup */
4653
4654 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4655 if (ret < 0)
4656 return ret;
4657
4658 buf |= WUCSR_RFE_WAKE_EN_;
4659 buf |= WUCSR_STORE_WAKE_;
4660
4661 ret = lan78xx_write_reg(dev, WUCSR, buf);
4662 if (ret < 0)
4663 return ret;
4664
4665 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4666 if (ret < 0)
4667 return ret;
4668
4669 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4670 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4671 buf |= PMT_CTL_PHY_WAKE_EN_;
4672 buf |= PMT_CTL_WOL_EN_;
4673 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4674 buf |= PMT_CTL_SUS_MODE_3_;
4675
4676 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4677 if (ret < 0)
4678 return ret;
4679
4680 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4681 if (ret < 0)
4682 return ret;
4683
4684 buf |= PMT_CTL_WUPS_MASK_;
4685
4686 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4687 if (ret < 0)
4688 return ret;
4689
4690 ret = lan78xx_start_rx_path(dev);
4691
4692 return ret;
4693 }
4694
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4695 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4696 {
4697 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4698 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4699 const u8 arp_type[2] = { 0x08, 0x06 };
4700 u32 temp_pmt_ctl;
4701 int mask_index;
4702 u32 temp_wucsr;
4703 u32 buf;
4704 u16 crc;
4705 int ret;
4706
4707 ret = lan78xx_stop_tx_path(dev);
4708 if (ret < 0)
4709 return ret;
4710 ret = lan78xx_stop_rx_path(dev);
4711 if (ret < 0)
4712 return ret;
4713
4714 ret = lan78xx_write_reg(dev, WUCSR, 0);
4715 if (ret < 0)
4716 return ret;
4717 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4718 if (ret < 0)
4719 return ret;
4720 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4721 if (ret < 0)
4722 return ret;
4723
4724 temp_wucsr = 0;
4725
4726 temp_pmt_ctl = 0;
4727
4728 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4729 if (ret < 0)
4730 return ret;
4731
4732 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4733 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4734
4735 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4736 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4737 if (ret < 0)
4738 return ret;
4739 }
4740
4741 mask_index = 0;
4742 if (wol & WAKE_PHY) {
4743 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4744
4745 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4746 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4747 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4748 }
4749 if (wol & WAKE_MAGIC) {
4750 temp_wucsr |= WUCSR_MPEN_;
4751
4752 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4753 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4754 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4755 }
4756 if (wol & WAKE_BCAST) {
4757 temp_wucsr |= WUCSR_BCST_EN_;
4758
4759 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4760 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4761 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4762 }
4763 if (wol & WAKE_MCAST) {
4764 temp_wucsr |= WUCSR_WAKE_EN_;
4765
4766 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4767 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4768 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4769 WUF_CFGX_EN_ |
4770 WUF_CFGX_TYPE_MCAST_ |
4771 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4772 (crc & WUF_CFGX_CRC16_MASK_));
4773 if (ret < 0)
4774 return ret;
4775
4776 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4777 if (ret < 0)
4778 return ret;
4779 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4780 if (ret < 0)
4781 return ret;
4782 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4783 if (ret < 0)
4784 return ret;
4785 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4786 if (ret < 0)
4787 return ret;
4788
4789 mask_index++;
4790
4791 /* for IPv6 Multicast */
4792 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4793 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4794 WUF_CFGX_EN_ |
4795 WUF_CFGX_TYPE_MCAST_ |
4796 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4797 (crc & WUF_CFGX_CRC16_MASK_));
4798 if (ret < 0)
4799 return ret;
4800
4801 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4802 if (ret < 0)
4803 return ret;
4804 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4805 if (ret < 0)
4806 return ret;
4807 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4808 if (ret < 0)
4809 return ret;
4810 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4811 if (ret < 0)
4812 return ret;
4813
4814 mask_index++;
4815
4816 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4817 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4818 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4819 }
4820 if (wol & WAKE_UCAST) {
4821 temp_wucsr |= WUCSR_PFDA_EN_;
4822
4823 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4824 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4825 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4826 }
4827 if (wol & WAKE_ARP) {
4828 temp_wucsr |= WUCSR_WAKE_EN_;
4829
4830 /* set WUF_CFG & WUF_MASK
4831 * for packettype (offset 12,13) = ARP (0x0806)
4832 */
4833 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4834 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4835 WUF_CFGX_EN_ |
4836 WUF_CFGX_TYPE_ALL_ |
4837 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4838 (crc & WUF_CFGX_CRC16_MASK_));
4839 if (ret < 0)
4840 return ret;
4841
4842 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4843 if (ret < 0)
4844 return ret;
4845 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4846 if (ret < 0)
4847 return ret;
4848 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4849 if (ret < 0)
4850 return ret;
4851 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4852 if (ret < 0)
4853 return ret;
4854
4855 mask_index++;
4856
4857 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4858 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4859 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4860 }
4861
4862 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4863 if (ret < 0)
4864 return ret;
4865
4866 /* when multiple WOL bits are set */
4867 if (hweight_long((unsigned long)wol) > 1) {
4868 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4869 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4870 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4871 }
4872 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4873 if (ret < 0)
4874 return ret;
4875
4876 /* clear WUPS */
4877 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4878 if (ret < 0)
4879 return ret;
4880
4881 buf |= PMT_CTL_WUPS_MASK_;
4882
4883 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4884 if (ret < 0)
4885 return ret;
4886
4887 ret = lan78xx_start_rx_path(dev);
4888
4889 return ret;
4890 }
4891
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)4892 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4893 {
4894 struct lan78xx_net *dev = usb_get_intfdata(intf);
4895 bool dev_open;
4896 int ret;
4897
4898 mutex_lock(&dev->dev_mutex);
4899
4900 netif_dbg(dev, ifdown, dev->net,
4901 "suspending: pm event %#x", message.event);
4902
4903 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4904
4905 if (dev_open) {
4906 spin_lock_irq(&dev->txq.lock);
4907 /* don't autosuspend while transmitting */
4908 if ((skb_queue_len(&dev->txq) ||
4909 skb_queue_len(&dev->txq_pend)) &&
4910 PMSG_IS_AUTO(message)) {
4911 spin_unlock_irq(&dev->txq.lock);
4912 ret = -EBUSY;
4913 goto out;
4914 } else {
4915 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4916 spin_unlock_irq(&dev->txq.lock);
4917 }
4918
4919 /* stop RX */
4920 ret = lan78xx_stop_rx_path(dev);
4921 if (ret < 0)
4922 goto out;
4923
4924 ret = lan78xx_flush_rx_fifo(dev);
4925 if (ret < 0)
4926 goto out;
4927
4928 /* stop Tx */
4929 ret = lan78xx_stop_tx_path(dev);
4930 if (ret < 0)
4931 goto out;
4932
4933 /* empty out the Rx and Tx queues */
4934 netif_device_detach(dev->net);
4935 lan78xx_terminate_urbs(dev);
4936 usb_kill_urb(dev->urb_intr);
4937
4938 /* reattach */
4939 netif_device_attach(dev->net);
4940
4941 del_timer(&dev->stat_monitor);
4942
4943 if (PMSG_IS_AUTO(message)) {
4944 ret = lan78xx_set_auto_suspend(dev);
4945 if (ret < 0)
4946 goto out;
4947 } else {
4948 struct lan78xx_priv *pdata;
4949
4950 pdata = (struct lan78xx_priv *)(dev->data[0]);
4951 netif_carrier_off(dev->net);
4952 ret = lan78xx_set_suspend(dev, pdata->wol);
4953 if (ret < 0)
4954 goto out;
4955 }
4956 } else {
4957 /* Interface is down; don't allow WOL and PHY
4958 * events to wake up the host
4959 */
4960 u32 buf;
4961
4962 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4963
4964 ret = lan78xx_write_reg(dev, WUCSR, 0);
4965 if (ret < 0)
4966 goto out;
4967 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4968 if (ret < 0)
4969 goto out;
4970
4971 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4972 if (ret < 0)
4973 goto out;
4974
4975 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4976 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4977 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4978 buf |= PMT_CTL_SUS_MODE_3_;
4979
4980 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4981 if (ret < 0)
4982 goto out;
4983
4984 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4985 if (ret < 0)
4986 goto out;
4987
4988 buf |= PMT_CTL_WUPS_MASK_;
4989
4990 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4991 if (ret < 0)
4992 goto out;
4993 }
4994
4995 ret = 0;
4996 out:
4997 mutex_unlock(&dev->dev_mutex);
4998
4999 return ret;
5000 }
5001
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)5002 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5003 {
5004 bool pipe_halted = false;
5005 struct urb *urb;
5006
5007 while ((urb = usb_get_from_anchor(&dev->deferred))) {
5008 struct sk_buff *skb = urb->context;
5009 int ret;
5010
5011 if (!netif_device_present(dev->net) ||
5012 !netif_carrier_ok(dev->net) ||
5013 pipe_halted) {
5014 lan78xx_release_tx_buf(dev, skb);
5015 continue;
5016 }
5017
5018 ret = usb_submit_urb(urb, GFP_ATOMIC);
5019
5020 if (ret == 0) {
5021 netif_trans_update(dev->net);
5022 lan78xx_queue_skb(&dev->txq, skb, tx_start);
5023 } else {
5024 if (ret == -EPIPE) {
5025 netif_stop_queue(dev->net);
5026 pipe_halted = true;
5027 } else if (ret == -ENODEV) {
5028 netif_device_detach(dev->net);
5029 }
5030
5031 lan78xx_release_tx_buf(dev, skb);
5032 }
5033 }
5034
5035 return pipe_halted;
5036 }
5037
lan78xx_resume(struct usb_interface * intf)5038 static int lan78xx_resume(struct usb_interface *intf)
5039 {
5040 struct lan78xx_net *dev = usb_get_intfdata(intf);
5041 bool dev_open;
5042 int ret;
5043
5044 mutex_lock(&dev->dev_mutex);
5045
5046 netif_dbg(dev, ifup, dev->net, "resuming device");
5047
5048 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5049
5050 if (dev_open) {
5051 bool pipe_halted = false;
5052
5053 ret = lan78xx_flush_tx_fifo(dev);
5054 if (ret < 0)
5055 goto out;
5056
5057 if (dev->urb_intr) {
5058 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5059
5060 if (ret < 0) {
5061 if (ret == -ENODEV)
5062 netif_device_detach(dev->net);
5063 netdev_warn(dev->net, "Failed to submit intr URB");
5064 }
5065 }
5066
5067 spin_lock_irq(&dev->txq.lock);
5068
5069 if (netif_device_present(dev->net)) {
5070 pipe_halted = lan78xx_submit_deferred_urbs(dev);
5071
5072 if (pipe_halted)
5073 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5074 }
5075
5076 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5077
5078 spin_unlock_irq(&dev->txq.lock);
5079
5080 if (!pipe_halted &&
5081 netif_device_present(dev->net) &&
5082 (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5083 netif_start_queue(dev->net);
5084
5085 ret = lan78xx_start_tx_path(dev);
5086 if (ret < 0)
5087 goto out;
5088
5089 napi_schedule(&dev->napi);
5090
5091 if (!timer_pending(&dev->stat_monitor)) {
5092 dev->delta = 1;
5093 mod_timer(&dev->stat_monitor,
5094 jiffies + STAT_UPDATE_TIMER);
5095 }
5096
5097 } else {
5098 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5099 }
5100
5101 ret = lan78xx_write_reg(dev, WUCSR2, 0);
5102 if (ret < 0)
5103 goto out;
5104 ret = lan78xx_write_reg(dev, WUCSR, 0);
5105 if (ret < 0)
5106 goto out;
5107 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5108 if (ret < 0)
5109 goto out;
5110
5111 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5112 WUCSR2_ARP_RCD_ |
5113 WUCSR2_IPV6_TCPSYN_RCD_ |
5114 WUCSR2_IPV4_TCPSYN_RCD_);
5115 if (ret < 0)
5116 goto out;
5117
5118 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5119 WUCSR_EEE_RX_WAKE_ |
5120 WUCSR_PFDA_FR_ |
5121 WUCSR_RFE_WAKE_FR_ |
5122 WUCSR_WUFR_ |
5123 WUCSR_MPR_ |
5124 WUCSR_BCST_FR_);
5125 if (ret < 0)
5126 goto out;
5127
5128 ret = 0;
5129 out:
5130 mutex_unlock(&dev->dev_mutex);
5131
5132 return ret;
5133 }
5134
lan78xx_reset_resume(struct usb_interface * intf)5135 static int lan78xx_reset_resume(struct usb_interface *intf)
5136 {
5137 struct lan78xx_net *dev = usb_get_intfdata(intf);
5138 int ret;
5139
5140 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5141
5142 ret = lan78xx_reset(dev);
5143 if (ret < 0)
5144 return ret;
5145
5146 phy_start(dev->net->phydev);
5147
5148 ret = lan78xx_resume(intf);
5149
5150 return ret;
5151 }
5152
5153 static const struct usb_device_id products[] = {
5154 {
5155 /* LAN7800 USB Gigabit Ethernet Device */
5156 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5157 },
5158 {
5159 /* LAN7850 USB Gigabit Ethernet Device */
5160 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5161 },
5162 {
5163 /* LAN7801 USB Gigabit Ethernet Device */
5164 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5165 },
5166 {
5167 /* ATM2-AF USB Gigabit Ethernet Device */
5168 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5169 },
5170 {},
5171 };
5172 MODULE_DEVICE_TABLE(usb, products);
5173
5174 static struct usb_driver lan78xx_driver = {
5175 .name = DRIVER_NAME,
5176 .id_table = products,
5177 .probe = lan78xx_probe,
5178 .disconnect = lan78xx_disconnect,
5179 .suspend = lan78xx_suspend,
5180 .resume = lan78xx_resume,
5181 .reset_resume = lan78xx_reset_resume,
5182 .supports_autosuspend = 1,
5183 .disable_hub_initiated_lpm = 1,
5184 };
5185
5186 module_usb_driver(lan78xx_driver);
5187
5188 MODULE_AUTHOR(DRIVER_AUTHOR);
5189 MODULE_DESCRIPTION(DRIVER_DESC);
5190 MODULE_LICENSE("GPL");
5191