1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2001 Vojtech Pavlik
4 *
5 * CATC EL1210A NetMate USB Ethernet driver
6 *
7 * Sponsored by SuSE
8 *
9 * Based on the work of
10 * Donald Becker
11 *
12 * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
13 * - adds support for Belkin F5U011
14 */
15
16 /*
17 *
18 * Should you need to contact me, the author, you can do so either by
19 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
20 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
21 */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/ethtool.h>
31 #include <linux/crc32.h>
32 #include <linux/bitops.h>
33 #include <linux/gfp.h>
34 #include <linux/uaccess.h>
35
36 #undef DEBUG
37
38 #include <linux/usb.h>
39
40 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
41 #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver"
42
43 MODULE_AUTHOR(DRIVER_AUTHOR);
44 MODULE_DESCRIPTION(DRIVER_DESC);
45 MODULE_LICENSE("GPL");
46
47 static const char driver_name[] = "catc";
48
49 /*
50 * Some defines.
51 */
52
53 #define STATS_UPDATE (HZ) /* Time between stats updates */
54 #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
55 #define PKT_SZ 1536 /* Max Ethernet packet size */
56 #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */
57 #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */
58 #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */
59 #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */
60
61 /*
62 * USB endpoints.
63 */
64
65 enum catc_usb_ep {
66 CATC_USB_EP_CONTROL = 0,
67 CATC_USB_EP_BULK = 1,
68 CATC_USB_EP_INT_IN = 2,
69 };
70
71 /*
72 * Control requests.
73 */
74
75 enum control_requests {
76 ReadMem = 0xf1,
77 GetMac = 0xf2,
78 Reset = 0xf4,
79 SetMac = 0xf5,
80 SetRxMode = 0xf5, /* F5U011 only */
81 WriteROM = 0xf8,
82 SetReg = 0xfa,
83 GetReg = 0xfb,
84 WriteMem = 0xfc,
85 ReadROM = 0xfd,
86 };
87
88 /*
89 * Registers.
90 */
91
92 enum register_offsets {
93 TxBufCount = 0x20,
94 RxBufCount = 0x21,
95 OpModes = 0x22,
96 TxQed = 0x23,
97 RxQed = 0x24,
98 MaxBurst = 0x25,
99 RxUnit = 0x60,
100 EthStatus = 0x61,
101 StationAddr0 = 0x67,
102 EthStats = 0x69,
103 LEDCtrl = 0x81,
104 };
105
106 enum eth_stats {
107 TxSingleColl = 0x00,
108 TxMultiColl = 0x02,
109 TxExcessColl = 0x04,
110 RxFramErr = 0x06,
111 };
112
113 enum op_mode_bits {
114 Op3MemWaits = 0x03,
115 OpLenInclude = 0x08,
116 OpRxMerge = 0x10,
117 OpTxMerge = 0x20,
118 OpWin95bugfix = 0x40,
119 OpLoopback = 0x80,
120 };
121
122 enum rx_filter_bits {
123 RxEnable = 0x01,
124 RxPolarity = 0x02,
125 RxForceOK = 0x04,
126 RxMultiCast = 0x08,
127 RxPromisc = 0x10,
128 AltRxPromisc = 0x20, /* F5U011 uses different bit */
129 };
130
131 enum led_values {
132 LEDFast = 0x01,
133 LEDSlow = 0x02,
134 LEDFlash = 0x03,
135 LEDPulse = 0x04,
136 LEDLink = 0x08,
137 };
138
139 enum link_status {
140 LinkNoChange = 0,
141 LinkGood = 1,
142 LinkBad = 2
143 };
144
145 /*
146 * The catc struct.
147 */
148
149 #define CTRL_RUNNING 0
150 #define RX_RUNNING 1
151 #define TX_RUNNING 2
152
153 struct catc {
154 struct net_device *netdev;
155 struct usb_device *usbdev;
156
157 unsigned long flags;
158
159 unsigned int tx_ptr, tx_idx;
160 unsigned int ctrl_head, ctrl_tail;
161 spinlock_t tx_lock, ctrl_lock;
162
163 u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)];
164 u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
165 u8 irq_buf[2];
166 u8 ctrl_buf[64];
167 struct usb_ctrlrequest ctrl_dr;
168
169 struct timer_list timer;
170 u8 stats_buf[8];
171 u16 stats_vals[4];
172 unsigned long last_stats;
173
174 u8 multicast[64];
175
176 struct ctrl_queue {
177 u8 dir;
178 u8 request;
179 u16 value;
180 u16 index;
181 void *buf;
182 int len;
183 void (*callback)(struct catc *catc, struct ctrl_queue *q);
184 } ctrl_queue[CTRL_QUEUE];
185
186 struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb;
187
188 u8 is_f5u011; /* Set if device is an F5U011 */
189 u8 rxmode[2]; /* Used for F5U011 */
190 atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */
191 };
192
193 /*
194 * Useful macros.
195 */
196
197 #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6)
198 #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0)
199 #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0)
200 #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1)
201 #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size)
202 #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size)
203
204 #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2)
205 #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL)
206 #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL)
207
208 #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL)
209 #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb)
210 #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL)
211
212 /*
213 * Receive routines.
214 */
215
catc_rx_done(struct urb * urb)216 static void catc_rx_done(struct urb *urb)
217 {
218 struct catc *catc = urb->context;
219 u8 *pkt_start = urb->transfer_buffer;
220 struct sk_buff *skb;
221 int pkt_len, pkt_offset = 0;
222 int status = urb->status;
223
224 if (!catc->is_f5u011) {
225 clear_bit(RX_RUNNING, &catc->flags);
226 pkt_offset = 2;
227 }
228
229 if (status) {
230 dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
231 status, urb->actual_length);
232 return;
233 }
234
235 do {
236 if(!catc->is_f5u011) {
237 pkt_len = le16_to_cpup((__le16*)pkt_start);
238 if (pkt_len > urb->actual_length) {
239 catc->netdev->stats.rx_length_errors++;
240 catc->netdev->stats.rx_errors++;
241 break;
242 }
243 } else {
244 pkt_len = urb->actual_length;
245 }
246
247 if (!(skb = dev_alloc_skb(pkt_len)))
248 return;
249
250 skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
251 skb_put(skb, pkt_len);
252
253 skb->protocol = eth_type_trans(skb, catc->netdev);
254 netif_rx(skb);
255
256 catc->netdev->stats.rx_packets++;
257 catc->netdev->stats.rx_bytes += pkt_len;
258
259 /* F5U011 only does one packet per RX */
260 if (catc->is_f5u011)
261 break;
262 pkt_start += (((pkt_len + 1) >> 6) + 1) << 6;
263
264 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length);
265
266 if (catc->is_f5u011) {
267 if (atomic_read(&catc->recq_sz)) {
268 int state;
269 atomic_dec(&catc->recq_sz);
270 netdev_dbg(catc->netdev, "getting extra packet\n");
271 urb->dev = catc->usbdev;
272 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
273 netdev_dbg(catc->netdev,
274 "submit(rx_urb) status %d\n", state);
275 }
276 } else {
277 clear_bit(RX_RUNNING, &catc->flags);
278 }
279 }
280 }
281
catc_irq_done(struct urb * urb)282 static void catc_irq_done(struct urb *urb)
283 {
284 struct catc *catc = urb->context;
285 u8 *data = urb->transfer_buffer;
286 int status = urb->status;
287 unsigned int hasdata, linksts = LinkNoChange;
288 int res;
289
290 if (!catc->is_f5u011) {
291 hasdata = data[1] & 0x80;
292 if (data[1] & 0x40)
293 linksts = LinkGood;
294 else if (data[1] & 0x20)
295 linksts = LinkBad;
296 } else {
297 hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff);
298 if (data[0] == 0x90)
299 linksts = LinkGood;
300 else if (data[0] == 0xA0)
301 linksts = LinkBad;
302 }
303
304 switch (status) {
305 case 0: /* success */
306 break;
307 case -ECONNRESET: /* unlink */
308 case -ENOENT:
309 case -ESHUTDOWN:
310 return;
311 /* -EPIPE: should clear the halt */
312 default: /* error */
313 dev_dbg(&urb->dev->dev,
314 "irq_done, status %d, data %02x %02x.\n",
315 status, data[0], data[1]);
316 goto resubmit;
317 }
318
319 if (linksts == LinkGood) {
320 netif_carrier_on(catc->netdev);
321 netdev_dbg(catc->netdev, "link ok\n");
322 }
323
324 if (linksts == LinkBad) {
325 netif_carrier_off(catc->netdev);
326 netdev_dbg(catc->netdev, "link bad\n");
327 }
328
329 if (hasdata) {
330 if (test_and_set_bit(RX_RUNNING, &catc->flags)) {
331 if (catc->is_f5u011)
332 atomic_inc(&catc->recq_sz);
333 } else {
334 catc->rx_urb->dev = catc->usbdev;
335 if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) {
336 dev_err(&catc->usbdev->dev,
337 "submit(rx_urb) status %d\n", res);
338 }
339 }
340 }
341 resubmit:
342 res = usb_submit_urb (urb, GFP_ATOMIC);
343 if (res)
344 dev_err(&catc->usbdev->dev,
345 "can't resubmit intr, %s-%s, status %d\n",
346 catc->usbdev->bus->bus_name,
347 catc->usbdev->devpath, res);
348 }
349
350 /*
351 * Transmit routines.
352 */
353
catc_tx_run(struct catc * catc)354 static int catc_tx_run(struct catc *catc)
355 {
356 int status;
357
358 if (catc->is_f5u011)
359 catc->tx_ptr = (catc->tx_ptr + 63) & ~63;
360
361 catc->tx_urb->transfer_buffer_length = catc->tx_ptr;
362 catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx];
363 catc->tx_urb->dev = catc->usbdev;
364
365 if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0)
366 dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n",
367 status);
368
369 catc->tx_idx = !catc->tx_idx;
370 catc->tx_ptr = 0;
371
372 netif_trans_update(catc->netdev);
373 return status;
374 }
375
catc_tx_done(struct urb * urb)376 static void catc_tx_done(struct urb *urb)
377 {
378 struct catc *catc = urb->context;
379 unsigned long flags;
380 int r, status = urb->status;
381
382 if (status == -ECONNRESET) {
383 dev_dbg(&urb->dev->dev, "Tx Reset.\n");
384 urb->status = 0;
385 netif_trans_update(catc->netdev);
386 catc->netdev->stats.tx_errors++;
387 clear_bit(TX_RUNNING, &catc->flags);
388 netif_wake_queue(catc->netdev);
389 return;
390 }
391
392 if (status) {
393 dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
394 status, urb->actual_length);
395 return;
396 }
397
398 spin_lock_irqsave(&catc->tx_lock, flags);
399
400 if (catc->tx_ptr) {
401 r = catc_tx_run(catc);
402 if (unlikely(r < 0))
403 clear_bit(TX_RUNNING, &catc->flags);
404 } else {
405 clear_bit(TX_RUNNING, &catc->flags);
406 }
407
408 netif_wake_queue(catc->netdev);
409
410 spin_unlock_irqrestore(&catc->tx_lock, flags);
411 }
412
catc_start_xmit(struct sk_buff * skb,struct net_device * netdev)413 static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
414 struct net_device *netdev)
415 {
416 struct catc *catc = netdev_priv(netdev);
417 unsigned long flags;
418 int r = 0;
419 char *tx_buf;
420
421 spin_lock_irqsave(&catc->tx_lock, flags);
422
423 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
424 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
425 if (catc->is_f5u011)
426 *(__be16 *)tx_buf = cpu_to_be16(skb->len);
427 else
428 *(__le16 *)tx_buf = cpu_to_le16(skb->len);
429 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
430 catc->tx_ptr += skb->len + 2;
431
432 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) {
433 r = catc_tx_run(catc);
434 if (r < 0)
435 clear_bit(TX_RUNNING, &catc->flags);
436 }
437
438 if ((catc->is_f5u011 && catc->tx_ptr) ||
439 (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
440 netif_stop_queue(netdev);
441
442 spin_unlock_irqrestore(&catc->tx_lock, flags);
443
444 if (r >= 0) {
445 catc->netdev->stats.tx_bytes += skb->len;
446 catc->netdev->stats.tx_packets++;
447 }
448
449 dev_kfree_skb(skb);
450
451 return NETDEV_TX_OK;
452 }
453
catc_tx_timeout(struct net_device * netdev,unsigned int txqueue)454 static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue)
455 {
456 struct catc *catc = netdev_priv(netdev);
457
458 dev_warn(&netdev->dev, "Transmit timed out.\n");
459 usb_unlink_urb(catc->tx_urb);
460 }
461
462 /*
463 * Control messages.
464 */
465
catc_ctrl_msg(struct catc * catc,u8 dir,u8 request,u16 value,u16 index,void * buf,int len)466 static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len)
467 {
468 int retval = usb_control_msg(catc->usbdev,
469 dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0),
470 request, 0x40 | dir, value, index, buf, len, 1000);
471 return retval < 0 ? retval : 0;
472 }
473
catc_ctrl_run(struct catc * catc)474 static void catc_ctrl_run(struct catc *catc)
475 {
476 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
477 struct usb_device *usbdev = catc->usbdev;
478 struct urb *urb = catc->ctrl_urb;
479 struct usb_ctrlrequest *dr = &catc->ctrl_dr;
480 int status;
481
482 dr->bRequest = q->request;
483 dr->bRequestType = 0x40 | q->dir;
484 dr->wValue = cpu_to_le16(q->value);
485 dr->wIndex = cpu_to_le16(q->index);
486 dr->wLength = cpu_to_le16(q->len);
487
488 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
489 urb->transfer_buffer_length = q->len;
490 urb->transfer_buffer = catc->ctrl_buf;
491 urb->setup_packet = (void *) dr;
492 urb->dev = usbdev;
493
494 if (!q->dir && q->buf && q->len)
495 memcpy(catc->ctrl_buf, q->buf, q->len);
496
497 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
498 dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n",
499 status);
500 }
501
catc_ctrl_done(struct urb * urb)502 static void catc_ctrl_done(struct urb *urb)
503 {
504 struct catc *catc = urb->context;
505 struct ctrl_queue *q;
506 unsigned long flags;
507 int status = urb->status;
508
509 if (status)
510 dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
511 status, urb->actual_length);
512
513 spin_lock_irqsave(&catc->ctrl_lock, flags);
514
515 q = catc->ctrl_queue + catc->ctrl_tail;
516
517 if (q->dir) {
518 if (q->buf && q->len)
519 memcpy(q->buf, catc->ctrl_buf, q->len);
520 else
521 q->buf = catc->ctrl_buf;
522 }
523
524 if (q->callback)
525 q->callback(catc, q);
526
527 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
528
529 if (catc->ctrl_head != catc->ctrl_tail)
530 catc_ctrl_run(catc);
531 else
532 clear_bit(CTRL_RUNNING, &catc->flags);
533
534 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
535 }
536
catc_ctrl_async(struct catc * catc,u8 dir,u8 request,u16 value,u16 index,void * buf,int len,void (* callback)(struct catc * catc,struct ctrl_queue * q))537 static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
538 u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q))
539 {
540 struct ctrl_queue *q;
541 int retval = 0;
542 unsigned long flags;
543
544 spin_lock_irqsave(&catc->ctrl_lock, flags);
545
546 q = catc->ctrl_queue + catc->ctrl_head;
547
548 q->dir = dir;
549 q->request = request;
550 q->value = value;
551 q->index = index;
552 q->buf = buf;
553 q->len = len;
554 q->callback = callback;
555
556 catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1);
557
558 if (catc->ctrl_head == catc->ctrl_tail) {
559 dev_err(&catc->usbdev->dev, "ctrl queue full\n");
560 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
561 retval = -1;
562 }
563
564 if (!test_and_set_bit(CTRL_RUNNING, &catc->flags))
565 catc_ctrl_run(catc);
566
567 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
568
569 return retval;
570 }
571
572 /*
573 * Statistics.
574 */
575
catc_stats_done(struct catc * catc,struct ctrl_queue * q)576 static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
577 {
578 int index = q->index - EthStats;
579 u16 data, last;
580
581 catc->stats_buf[index] = *((char *)q->buf);
582
583 if (index & 1)
584 return;
585
586 data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1];
587 last = catc->stats_vals[index >> 1];
588
589 switch (index) {
590 case TxSingleColl:
591 case TxMultiColl:
592 catc->netdev->stats.collisions += data - last;
593 break;
594 case TxExcessColl:
595 catc->netdev->stats.tx_aborted_errors += data - last;
596 catc->netdev->stats.tx_errors += data - last;
597 break;
598 case RxFramErr:
599 catc->netdev->stats.rx_frame_errors += data - last;
600 catc->netdev->stats.rx_errors += data - last;
601 break;
602 }
603
604 catc->stats_vals[index >> 1] = data;
605 }
606
catc_stats_timer(struct timer_list * t)607 static void catc_stats_timer(struct timer_list *t)
608 {
609 struct catc *catc = timer_container_of(catc, t, timer);
610 int i;
611
612 for (i = 0; i < 8; i++)
613 catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done);
614
615 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
616 }
617
618 /*
619 * Receive modes. Broadcast, Multicast, Promisc.
620 */
621
catc_multicast(const unsigned char * addr,u8 * multicast)622 static void catc_multicast(const unsigned char *addr, u8 *multicast)
623 {
624 u32 crc;
625
626 crc = ether_crc_le(6, addr);
627 multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
628 }
629
catc_set_multicast_list(struct net_device * netdev)630 static void catc_set_multicast_list(struct net_device *netdev)
631 {
632 struct catc *catc = netdev_priv(netdev);
633 struct netdev_hw_addr *ha;
634 u8 broadcast[ETH_ALEN];
635 u8 rx = RxEnable | RxPolarity | RxMultiCast;
636
637 eth_broadcast_addr(broadcast);
638 memset(catc->multicast, 0, 64);
639
640 catc_multicast(broadcast, catc->multicast);
641 catc_multicast(netdev->dev_addr, catc->multicast);
642
643 if (netdev->flags & IFF_PROMISC) {
644 memset(catc->multicast, 0xff, 64);
645 rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
646 }
647
648 if (netdev->flags & IFF_ALLMULTI) {
649 memset(catc->multicast, 0xff, 64);
650 } else {
651 netdev_for_each_mc_addr(ha, netdev) {
652 u32 crc = ether_crc_le(6, ha->addr);
653 if (!catc->is_f5u011) {
654 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
655 } else {
656 catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7);
657 }
658 }
659 }
660 if (!catc->is_f5u011) {
661 catc_set_reg_async(catc, RxUnit, rx);
662 catc_write_mem_async(catc, 0xfa80, catc->multicast, 64);
663 } else {
664 f5u011_mchash_async(catc, catc->multicast);
665 if (catc->rxmode[0] != rx) {
666 catc->rxmode[0] = rx;
667 netdev_dbg(catc->netdev,
668 "Setting RX mode to %2.2X %2.2X\n",
669 catc->rxmode[0], catc->rxmode[1]);
670 f5u011_rxmode_async(catc, catc->rxmode);
671 }
672 }
673 }
674
catc_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)675 static void catc_get_drvinfo(struct net_device *dev,
676 struct ethtool_drvinfo *info)
677 {
678 struct catc *catc = netdev_priv(dev);
679 strscpy(info->driver, driver_name, sizeof(info->driver));
680 usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
681 }
682
catc_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)683 static int catc_get_link_ksettings(struct net_device *dev,
684 struct ethtool_link_ksettings *cmd)
685 {
686 struct catc *catc = netdev_priv(dev);
687 if (!catc->is_f5u011)
688 return -EOPNOTSUPP;
689
690 ethtool_link_ksettings_zero_link_mode(cmd, supported);
691 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
692 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
693
694 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
695 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
696 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
697
698 cmd->base.speed = SPEED_10;
699 cmd->base.duplex = DUPLEX_HALF;
700 cmd->base.port = PORT_TP;
701 cmd->base.phy_address = 0;
702 cmd->base.autoneg = AUTONEG_DISABLE;
703
704 return 0;
705 }
706
707 static const struct ethtool_ops ops = {
708 .get_drvinfo = catc_get_drvinfo,
709 .get_link = ethtool_op_get_link,
710 .get_link_ksettings = catc_get_link_ksettings,
711 };
712
713 /*
714 * Open, close.
715 */
716
catc_open(struct net_device * netdev)717 static int catc_open(struct net_device *netdev)
718 {
719 struct catc *catc = netdev_priv(netdev);
720 int status;
721
722 catc->irq_urb->dev = catc->usbdev;
723 if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) {
724 dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n",
725 status);
726 return -1;
727 }
728
729 netif_start_queue(netdev);
730
731 if (!catc->is_f5u011)
732 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
733
734 return 0;
735 }
736
catc_stop(struct net_device * netdev)737 static int catc_stop(struct net_device *netdev)
738 {
739 struct catc *catc = netdev_priv(netdev);
740
741 netif_stop_queue(netdev);
742
743 if (!catc->is_f5u011)
744 timer_delete_sync(&catc->timer);
745
746 usb_kill_urb(catc->rx_urb);
747 usb_kill_urb(catc->tx_urb);
748 usb_kill_urb(catc->irq_urb);
749 usb_kill_urb(catc->ctrl_urb);
750
751 return 0;
752 }
753
754 static const struct net_device_ops catc_netdev_ops = {
755 .ndo_open = catc_open,
756 .ndo_stop = catc_stop,
757 .ndo_start_xmit = catc_start_xmit,
758
759 .ndo_tx_timeout = catc_tx_timeout,
760 .ndo_set_rx_mode = catc_set_multicast_list,
761 .ndo_set_mac_address = eth_mac_addr,
762 .ndo_validate_addr = eth_validate_addr,
763 };
764
765 /*
766 * USB probe, disconnect.
767 */
768
catc_probe(struct usb_interface * intf,const struct usb_device_id * id)769 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
770 {
771 struct device *dev = &intf->dev;
772 struct usb_device *usbdev = interface_to_usbdev(intf);
773 struct net_device *netdev;
774 struct catc *catc;
775 u8 broadcast[ETH_ALEN];
776 u8 *macbuf;
777 int pktsz, ret = -ENOMEM;
778 static const u8 bulk_ep_addr[] = {
779 CATC_USB_EP_BULK | USB_DIR_OUT,
780 CATC_USB_EP_BULK | USB_DIR_IN,
781 0};
782 static const u8 int_ep_addr[] = {
783 CATC_USB_EP_INT_IN | USB_DIR_IN,
784 0};
785
786 macbuf = kmalloc(ETH_ALEN, GFP_KERNEL);
787 if (!macbuf)
788 goto error;
789
790 if (usb_set_interface(usbdev,
791 intf->altsetting->desc.bInterfaceNumber, 1)) {
792 dev_err(dev, "Can't set altsetting 1.\n");
793 ret = -EIO;
794 goto fail_mem;
795 }
796
797 /* Verify that all required endpoints are present */
798 if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
799 !usb_check_int_endpoints(intf, int_ep_addr)) {
800 dev_err(dev, "Missing or invalid endpoints\n");
801 ret = -ENODEV;
802 goto fail_mem;
803 }
804
805 netdev = alloc_etherdev(sizeof(struct catc));
806 if (!netdev)
807 goto fail_mem;
808
809 catc = netdev_priv(netdev);
810
811 netdev->netdev_ops = &catc_netdev_ops;
812 netdev->watchdog_timeo = TX_TIMEOUT;
813 netdev->ethtool_ops = &ops;
814
815 catc->usbdev = usbdev;
816 catc->netdev = netdev;
817
818 spin_lock_init(&catc->tx_lock);
819 spin_lock_init(&catc->ctrl_lock);
820
821 timer_setup(&catc->timer, catc_stats_timer, 0);
822
823 catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
824 catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
825 catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
826 catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
827 if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
828 (!catc->rx_urb) || (!catc->irq_urb)) {
829 dev_err(&intf->dev, "No free urbs available.\n");
830 ret = -ENOMEM;
831 goto fail_free;
832 }
833
834 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
835 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
836 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
837 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
838 dev_dbg(dev, "Testing for f5u011\n");
839 catc->is_f5u011 = 1;
840 atomic_set(&catc->recq_sz, 0);
841 pktsz = RX_PKT_SZ;
842 } else {
843 pktsz = RX_MAX_BURST * (PKT_SZ + 2);
844 }
845
846 usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
847 NULL, NULL, 0, catc_ctrl_done, catc);
848
849 usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, CATC_USB_EP_BULK),
850 NULL, 0, catc_tx_done, catc);
851
852 usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, CATC_USB_EP_BULK),
853 catc->rx_buf, pktsz, catc_rx_done, catc);
854
855 usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, CATC_USB_EP_INT_IN),
856 catc->irq_buf, 2, catc_irq_done, catc, 1);
857
858 if (!catc->is_f5u011) {
859 u32 *buf;
860 int i;
861
862 dev_dbg(dev, "Checking memory size\n");
863
864 buf = kmalloc(4, GFP_KERNEL);
865 if (!buf) {
866 ret = -ENOMEM;
867 goto fail_free;
868 }
869
870 *buf = 0x12345678;
871 catc_write_mem(catc, 0x7a80, buf, 4);
872 *buf = 0x87654321;
873 catc_write_mem(catc, 0xfa80, buf, 4);
874 catc_read_mem(catc, 0x7a80, buf, 4);
875
876 switch (*buf) {
877 case 0x12345678:
878 catc_set_reg(catc, TxBufCount, 8);
879 catc_set_reg(catc, RxBufCount, 32);
880 dev_dbg(dev, "64k Memory\n");
881 break;
882 default:
883 dev_warn(&intf->dev,
884 "Couldn't detect memory size, assuming 32k\n");
885 fallthrough;
886 case 0x87654321:
887 catc_set_reg(catc, TxBufCount, 4);
888 catc_set_reg(catc, RxBufCount, 16);
889 dev_dbg(dev, "32k Memory\n");
890 break;
891 }
892
893 kfree(buf);
894
895 dev_dbg(dev, "Getting MAC from SEEROM.\n");
896
897 catc_get_mac(catc, macbuf);
898 eth_hw_addr_set(netdev, macbuf);
899
900 dev_dbg(dev, "Setting MAC into registers.\n");
901
902 for (i = 0; i < 6; i++)
903 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
904
905 dev_dbg(dev, "Filling the multicast list.\n");
906
907 eth_broadcast_addr(broadcast);
908 catc_multicast(broadcast, catc->multicast);
909 catc_multicast(netdev->dev_addr, catc->multicast);
910 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
911
912 dev_dbg(dev, "Clearing error counters.\n");
913
914 for (i = 0; i < 8; i++)
915 catc_set_reg(catc, EthStats + i, 0);
916 catc->last_stats = jiffies;
917
918 dev_dbg(dev, "Enabling.\n");
919
920 catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
921 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
922 catc_set_reg(catc, LEDCtrl, LEDLink);
923 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
924 } else {
925 dev_dbg(dev, "Performing reset\n");
926 catc_reset(catc);
927 catc_get_mac(catc, macbuf);
928 eth_hw_addr_set(netdev, macbuf);
929
930 dev_dbg(dev, "Setting RX Mode\n");
931 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
932 catc->rxmode[1] = 0;
933 f5u011_rxmode(catc, catc->rxmode);
934 }
935 dev_dbg(dev, "Init done.\n");
936 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
937 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
938 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
939 usb_set_intfdata(intf, catc);
940
941 SET_NETDEV_DEV(netdev, &intf->dev);
942 ret = register_netdev(netdev);
943 if (ret)
944 goto fail_clear_intfdata;
945
946 kfree(macbuf);
947 return 0;
948
949 fail_clear_intfdata:
950 usb_set_intfdata(intf, NULL);
951 fail_free:
952 usb_free_urb(catc->ctrl_urb);
953 usb_free_urb(catc->tx_urb);
954 usb_free_urb(catc->rx_urb);
955 usb_free_urb(catc->irq_urb);
956 free_netdev(netdev);
957 fail_mem:
958 kfree(macbuf);
959 error:
960 return ret;
961 }
962
catc_disconnect(struct usb_interface * intf)963 static void catc_disconnect(struct usb_interface *intf)
964 {
965 struct catc *catc = usb_get_intfdata(intf);
966
967 usb_set_intfdata(intf, NULL);
968 if (catc) {
969 unregister_netdev(catc->netdev);
970 usb_free_urb(catc->ctrl_urb);
971 usb_free_urb(catc->tx_urb);
972 usb_free_urb(catc->rx_urb);
973 usb_free_urb(catc->irq_urb);
974 free_netdev(catc->netdev);
975 }
976 }
977
978 /*
979 * Module functions and tables.
980 */
981
982 static const struct usb_device_id catc_id_table[] = {
983 { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */
984 { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */
985 { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */
986 { }
987 };
988
989 MODULE_DEVICE_TABLE(usb, catc_id_table);
990
991 static struct usb_driver catc_driver = {
992 .name = driver_name,
993 .probe = catc_probe,
994 .disconnect = catc_disconnect,
995 .id_table = catc_id_table,
996 .disable_hub_initiated_lpm = 1,
997 };
998
999 module_usb_driver(catc_driver);
1000