xref: /linux/drivers/net/usb/catc.c (revision 97733180fafbeb7cc3fd1c8be60d05980615f5d6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) 2001 Vojtech Pavlik
4  *
5  *  CATC EL1210A NetMate USB Ethernet driver
6  *
7  *  Sponsored by SuSE
8  *
9  *  Based on the work of
10  *		Donald Becker
11  *
12  *  Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
13  *    - adds support for Belkin F5U011
14  */
15 
16 /*
17  *
18  * Should you need to contact me, the author, you can do so either by
19  * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
20  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
21  */
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/ethtool.h>
31 #include <linux/crc32.h>
32 #include <linux/bitops.h>
33 #include <linux/gfp.h>
34 #include <linux/uaccess.h>
35 
36 #undef DEBUG
37 
38 #include <linux/usb.h>
39 
40 /*
41  * Version information.
42  */
43 
44 #define DRIVER_VERSION "v2.8"
45 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
46 #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver"
47 #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet"
48 
49 MODULE_AUTHOR(DRIVER_AUTHOR);
50 MODULE_DESCRIPTION(DRIVER_DESC);
51 MODULE_LICENSE("GPL");
52 
53 static const char driver_name[] = "catc";
54 
55 /*
56  * Some defines.
57  */
58 
59 #define STATS_UPDATE		(HZ)	/* Time between stats updates */
60 #define TX_TIMEOUT		(5*HZ)	/* Max time the queue can be stopped */
61 #define PKT_SZ			1536	/* Max Ethernet packet size */
62 #define RX_MAX_BURST		15	/* Max packets per rx buffer (> 0, < 16) */
63 #define TX_MAX_BURST		15	/* Max full sized packets per tx buffer (> 0) */
64 #define CTRL_QUEUE		16	/* Max control requests in flight (power of two) */
65 #define RX_PKT_SZ		1600	/* Max size of receive packet for F5U011 */
66 
67 /*
68  * Control requests.
69  */
70 
71 enum control_requests {
72 	ReadMem =	0xf1,
73 	GetMac =	0xf2,
74 	Reset =		0xf4,
75 	SetMac =	0xf5,
76 	SetRxMode =     0xf5,  /* F5U011 only */
77 	WriteROM =	0xf8,
78 	SetReg =	0xfa,
79 	GetReg =	0xfb,
80 	WriteMem =	0xfc,
81 	ReadROM =	0xfd,
82 };
83 
84 /*
85  * Registers.
86  */
87 
88 enum register_offsets {
89 	TxBufCount =	0x20,
90 	RxBufCount =	0x21,
91 	OpModes =	0x22,
92 	TxQed =		0x23,
93 	RxQed =		0x24,
94 	MaxBurst =	0x25,
95 	RxUnit =	0x60,
96 	EthStatus =	0x61,
97 	StationAddr0 =	0x67,
98 	EthStats =	0x69,
99 	LEDCtrl =	0x81,
100 };
101 
102 enum eth_stats {
103 	TxSingleColl =	0x00,
104         TxMultiColl =	0x02,
105         TxExcessColl =	0x04,
106         RxFramErr =	0x06,
107 };
108 
109 enum op_mode_bits {
110 	Op3MemWaits =	0x03,
111 	OpLenInclude =	0x08,
112 	OpRxMerge =	0x10,
113 	OpTxMerge =	0x20,
114 	OpWin95bugfix =	0x40,
115 	OpLoopback =	0x80,
116 };
117 
118 enum rx_filter_bits {
119 	RxEnable =	0x01,
120 	RxPolarity =	0x02,
121 	RxForceOK =	0x04,
122 	RxMultiCast =	0x08,
123 	RxPromisc =	0x10,
124 	AltRxPromisc =  0x20, /* F5U011 uses different bit */
125 };
126 
127 enum led_values {
128 	LEDFast = 	0x01,
129 	LEDSlow =	0x02,
130 	LEDFlash =	0x03,
131 	LEDPulse =	0x04,
132 	LEDLink =	0x08,
133 };
134 
135 enum link_status {
136 	LinkNoChange = 0,
137 	LinkGood     = 1,
138 	LinkBad      = 2
139 };
140 
141 /*
142  * The catc struct.
143  */
144 
145 #define CTRL_RUNNING	0
146 #define RX_RUNNING	1
147 #define TX_RUNNING	2
148 
149 struct catc {
150 	struct net_device *netdev;
151 	struct usb_device *usbdev;
152 
153 	unsigned long flags;
154 
155 	unsigned int tx_ptr, tx_idx;
156 	unsigned int ctrl_head, ctrl_tail;
157 	spinlock_t tx_lock, ctrl_lock;
158 
159 	u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)];
160 	u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
161 	u8 irq_buf[2];
162 	u8 ctrl_buf[64];
163 	struct usb_ctrlrequest ctrl_dr;
164 
165 	struct timer_list timer;
166 	u8 stats_buf[8];
167 	u16 stats_vals[4];
168 	unsigned long last_stats;
169 
170 	u8 multicast[64];
171 
172 	struct ctrl_queue {
173 		u8 dir;
174 		u8 request;
175 		u16 value;
176 		u16 index;
177 		void *buf;
178 		int len;
179 		void (*callback)(struct catc *catc, struct ctrl_queue *q);
180 	} ctrl_queue[CTRL_QUEUE];
181 
182 	struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb;
183 
184 	u8 is_f5u011;	/* Set if device is an F5U011 */
185 	u8 rxmode[2];	/* Used for F5U011 */
186 	atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */
187 };
188 
189 /*
190  * Useful macros.
191  */
192 
193 #define catc_get_mac(catc, mac)				catc_ctrl_msg(catc, USB_DIR_IN,  GetMac, 0, 0, mac,  6)
194 #define catc_reset(catc)				catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0)
195 #define catc_set_reg(catc, reg, val)			catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0)
196 #define catc_get_reg(catc, reg, buf)			catc_ctrl_msg(catc, USB_DIR_IN,  GetReg, 0, reg, buf, 1)
197 #define catc_write_mem(catc, addr, buf, size)		catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size)
198 #define catc_read_mem(catc, addr, buf, size)		catc_ctrl_msg(catc, USB_DIR_IN,  ReadMem, 0, addr, buf, size)
199 
200 #define f5u011_rxmode(catc, rxmode)			catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2)
201 #define f5u011_rxmode_async(catc, rxmode)		catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL)
202 #define f5u011_mchash_async(catc, hash)			catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL)
203 
204 #define catc_set_reg_async(catc, reg, val)		catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL)
205 #define catc_get_reg_async(catc, reg, cb)		catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb)
206 #define catc_write_mem_async(catc, addr, buf, size)	catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL)
207 
208 /*
209  * Receive routines.
210  */
211 
212 static void catc_rx_done(struct urb *urb)
213 {
214 	struct catc *catc = urb->context;
215 	u8 *pkt_start = urb->transfer_buffer;
216 	struct sk_buff *skb;
217 	int pkt_len, pkt_offset = 0;
218 	int status = urb->status;
219 
220 	if (!catc->is_f5u011) {
221 		clear_bit(RX_RUNNING, &catc->flags);
222 		pkt_offset = 2;
223 	}
224 
225 	if (status) {
226 		dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
227 			status, urb->actual_length);
228 		return;
229 	}
230 
231 	do {
232 		if(!catc->is_f5u011) {
233 			pkt_len = le16_to_cpup((__le16*)pkt_start);
234 			if (pkt_len > urb->actual_length) {
235 				catc->netdev->stats.rx_length_errors++;
236 				catc->netdev->stats.rx_errors++;
237 				break;
238 			}
239 		} else {
240 			pkt_len = urb->actual_length;
241 		}
242 
243 		if (!(skb = dev_alloc_skb(pkt_len)))
244 			return;
245 
246 		skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
247 		skb_put(skb, pkt_len);
248 
249 		skb->protocol = eth_type_trans(skb, catc->netdev);
250 		netif_rx(skb);
251 
252 		catc->netdev->stats.rx_packets++;
253 		catc->netdev->stats.rx_bytes += pkt_len;
254 
255 		/* F5U011 only does one packet per RX */
256 		if (catc->is_f5u011)
257 			break;
258 		pkt_start += (((pkt_len + 1) >> 6) + 1) << 6;
259 
260 	} while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length);
261 
262 	if (catc->is_f5u011) {
263 		if (atomic_read(&catc->recq_sz)) {
264 			int state;
265 			atomic_dec(&catc->recq_sz);
266 			netdev_dbg(catc->netdev, "getting extra packet\n");
267 			urb->dev = catc->usbdev;
268 			if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
269 				netdev_dbg(catc->netdev,
270 					   "submit(rx_urb) status %d\n", state);
271 			}
272 		} else {
273 			clear_bit(RX_RUNNING, &catc->flags);
274 		}
275 	}
276 }
277 
278 static void catc_irq_done(struct urb *urb)
279 {
280 	struct catc *catc = urb->context;
281 	u8 *data = urb->transfer_buffer;
282 	int status = urb->status;
283 	unsigned int hasdata = 0, linksts = LinkNoChange;
284 	int res;
285 
286 	if (!catc->is_f5u011) {
287 		hasdata = data[1] & 0x80;
288 		if (data[1] & 0x40)
289 			linksts = LinkGood;
290 		else if (data[1] & 0x20)
291 			linksts = LinkBad;
292 	} else {
293 		hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff);
294 		if (data[0] == 0x90)
295 			linksts = LinkGood;
296 		else if (data[0] == 0xA0)
297 			linksts = LinkBad;
298 	}
299 
300 	switch (status) {
301 	case 0:			/* success */
302 		break;
303 	case -ECONNRESET:	/* unlink */
304 	case -ENOENT:
305 	case -ESHUTDOWN:
306 		return;
307 	/* -EPIPE:  should clear the halt */
308 	default:		/* error */
309 		dev_dbg(&urb->dev->dev,
310 			"irq_done, status %d, data %02x %02x.\n",
311 			status, data[0], data[1]);
312 		goto resubmit;
313 	}
314 
315 	if (linksts == LinkGood) {
316 		netif_carrier_on(catc->netdev);
317 		netdev_dbg(catc->netdev, "link ok\n");
318 	}
319 
320 	if (linksts == LinkBad) {
321 		netif_carrier_off(catc->netdev);
322 		netdev_dbg(catc->netdev, "link bad\n");
323 	}
324 
325 	if (hasdata) {
326 		if (test_and_set_bit(RX_RUNNING, &catc->flags)) {
327 			if (catc->is_f5u011)
328 				atomic_inc(&catc->recq_sz);
329 		} else {
330 			catc->rx_urb->dev = catc->usbdev;
331 			if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) {
332 				dev_err(&catc->usbdev->dev,
333 					"submit(rx_urb) status %d\n", res);
334 			}
335 		}
336 	}
337 resubmit:
338 	res = usb_submit_urb (urb, GFP_ATOMIC);
339 	if (res)
340 		dev_err(&catc->usbdev->dev,
341 			"can't resubmit intr, %s-%s, status %d\n",
342 			catc->usbdev->bus->bus_name,
343 			catc->usbdev->devpath, res);
344 }
345 
346 /*
347  * Transmit routines.
348  */
349 
350 static int catc_tx_run(struct catc *catc)
351 {
352 	int status;
353 
354 	if (catc->is_f5u011)
355 		catc->tx_ptr = (catc->tx_ptr + 63) & ~63;
356 
357 	catc->tx_urb->transfer_buffer_length = catc->tx_ptr;
358 	catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx];
359 	catc->tx_urb->dev = catc->usbdev;
360 
361 	if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0)
362 		dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n",
363 			status);
364 
365 	catc->tx_idx = !catc->tx_idx;
366 	catc->tx_ptr = 0;
367 
368 	netif_trans_update(catc->netdev);
369 	return status;
370 }
371 
372 static void catc_tx_done(struct urb *urb)
373 {
374 	struct catc *catc = urb->context;
375 	unsigned long flags;
376 	int r, status = urb->status;
377 
378 	if (status == -ECONNRESET) {
379 		dev_dbg(&urb->dev->dev, "Tx Reset.\n");
380 		urb->status = 0;
381 		netif_trans_update(catc->netdev);
382 		catc->netdev->stats.tx_errors++;
383 		clear_bit(TX_RUNNING, &catc->flags);
384 		netif_wake_queue(catc->netdev);
385 		return;
386 	}
387 
388 	if (status) {
389 		dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
390 			status, urb->actual_length);
391 		return;
392 	}
393 
394 	spin_lock_irqsave(&catc->tx_lock, flags);
395 
396 	if (catc->tx_ptr) {
397 		r = catc_tx_run(catc);
398 		if (unlikely(r < 0))
399 			clear_bit(TX_RUNNING, &catc->flags);
400 	} else {
401 		clear_bit(TX_RUNNING, &catc->flags);
402 	}
403 
404 	netif_wake_queue(catc->netdev);
405 
406 	spin_unlock_irqrestore(&catc->tx_lock, flags);
407 }
408 
409 static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
410 					 struct net_device *netdev)
411 {
412 	struct catc *catc = netdev_priv(netdev);
413 	unsigned long flags;
414 	int r = 0;
415 	char *tx_buf;
416 
417 	spin_lock_irqsave(&catc->tx_lock, flags);
418 
419 	catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
420 	tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
421 	if (catc->is_f5u011)
422 		*(__be16 *)tx_buf = cpu_to_be16(skb->len);
423 	else
424 		*(__le16 *)tx_buf = cpu_to_le16(skb->len);
425 	skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
426 	catc->tx_ptr += skb->len + 2;
427 
428 	if (!test_and_set_bit(TX_RUNNING, &catc->flags)) {
429 		r = catc_tx_run(catc);
430 		if (r < 0)
431 			clear_bit(TX_RUNNING, &catc->flags);
432 	}
433 
434 	if ((catc->is_f5u011 && catc->tx_ptr) ||
435 	    (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
436 		netif_stop_queue(netdev);
437 
438 	spin_unlock_irqrestore(&catc->tx_lock, flags);
439 
440 	if (r >= 0) {
441 		catc->netdev->stats.tx_bytes += skb->len;
442 		catc->netdev->stats.tx_packets++;
443 	}
444 
445 	dev_kfree_skb(skb);
446 
447 	return NETDEV_TX_OK;
448 }
449 
450 static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue)
451 {
452 	struct catc *catc = netdev_priv(netdev);
453 
454 	dev_warn(&netdev->dev, "Transmit timed out.\n");
455 	usb_unlink_urb(catc->tx_urb);
456 }
457 
458 /*
459  * Control messages.
460  */
461 
462 static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len)
463 {
464         int retval = usb_control_msg(catc->usbdev,
465 		dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0),
466 		 request, 0x40 | dir, value, index, buf, len, 1000);
467         return retval < 0 ? retval : 0;
468 }
469 
470 static void catc_ctrl_run(struct catc *catc)
471 {
472 	struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
473 	struct usb_device *usbdev = catc->usbdev;
474 	struct urb *urb = catc->ctrl_urb;
475 	struct usb_ctrlrequest *dr = &catc->ctrl_dr;
476 	int status;
477 
478 	dr->bRequest = q->request;
479 	dr->bRequestType = 0x40 | q->dir;
480 	dr->wValue = cpu_to_le16(q->value);
481 	dr->wIndex = cpu_to_le16(q->index);
482 	dr->wLength = cpu_to_le16(q->len);
483 
484         urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
485 	urb->transfer_buffer_length = q->len;
486 	urb->transfer_buffer = catc->ctrl_buf;
487 	urb->setup_packet = (void *) dr;
488 	urb->dev = usbdev;
489 
490 	if (!q->dir && q->buf && q->len)
491 		memcpy(catc->ctrl_buf, q->buf, q->len);
492 
493 	if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
494 		dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n",
495 			status);
496 }
497 
498 static void catc_ctrl_done(struct urb *urb)
499 {
500 	struct catc *catc = urb->context;
501 	struct ctrl_queue *q;
502 	unsigned long flags;
503 	int status = urb->status;
504 
505 	if (status)
506 		dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
507 			status, urb->actual_length);
508 
509 	spin_lock_irqsave(&catc->ctrl_lock, flags);
510 
511 	q = catc->ctrl_queue + catc->ctrl_tail;
512 
513 	if (q->dir) {
514 		if (q->buf && q->len)
515 			memcpy(q->buf, catc->ctrl_buf, q->len);
516 		else
517 			q->buf = catc->ctrl_buf;
518 	}
519 
520 	if (q->callback)
521 		q->callback(catc, q);
522 
523 	catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
524 
525 	if (catc->ctrl_head != catc->ctrl_tail)
526 		catc_ctrl_run(catc);
527 	else
528 		clear_bit(CTRL_RUNNING, &catc->flags);
529 
530 	spin_unlock_irqrestore(&catc->ctrl_lock, flags);
531 }
532 
533 static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
534 	u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q))
535 {
536 	struct ctrl_queue *q;
537 	int retval = 0;
538 	unsigned long flags;
539 
540 	spin_lock_irqsave(&catc->ctrl_lock, flags);
541 
542 	q = catc->ctrl_queue + catc->ctrl_head;
543 
544 	q->dir = dir;
545 	q->request = request;
546 	q->value = value;
547 	q->index = index;
548 	q->buf = buf;
549 	q->len = len;
550 	q->callback = callback;
551 
552 	catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1);
553 
554 	if (catc->ctrl_head == catc->ctrl_tail) {
555 		dev_err(&catc->usbdev->dev, "ctrl queue full\n");
556 		catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
557 		retval = -1;
558 	}
559 
560 	if (!test_and_set_bit(CTRL_RUNNING, &catc->flags))
561 		catc_ctrl_run(catc);
562 
563 	spin_unlock_irqrestore(&catc->ctrl_lock, flags);
564 
565 	return retval;
566 }
567 
568 /*
569  * Statistics.
570  */
571 
572 static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
573 {
574 	int index = q->index - EthStats;
575 	u16 data, last;
576 
577 	catc->stats_buf[index] = *((char *)q->buf);
578 
579 	if (index & 1)
580 		return;
581 
582 	data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1];
583 	last = catc->stats_vals[index >> 1];
584 
585 	switch (index) {
586 		case TxSingleColl:
587 		case TxMultiColl:
588 			catc->netdev->stats.collisions += data - last;
589 			break;
590 		case TxExcessColl:
591 			catc->netdev->stats.tx_aborted_errors += data - last;
592 			catc->netdev->stats.tx_errors += data - last;
593 			break;
594 		case RxFramErr:
595 			catc->netdev->stats.rx_frame_errors += data - last;
596 			catc->netdev->stats.rx_errors += data - last;
597 			break;
598 	}
599 
600 	catc->stats_vals[index >> 1] = data;
601 }
602 
603 static void catc_stats_timer(struct timer_list *t)
604 {
605 	struct catc *catc = from_timer(catc, t, timer);
606 	int i;
607 
608 	for (i = 0; i < 8; i++)
609 		catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done);
610 
611 	mod_timer(&catc->timer, jiffies + STATS_UPDATE);
612 }
613 
614 /*
615  * Receive modes. Broadcast, Multicast, Promisc.
616  */
617 
618 static void catc_multicast(const unsigned char *addr, u8 *multicast)
619 {
620 	u32 crc;
621 
622 	crc = ether_crc_le(6, addr);
623 	multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
624 }
625 
626 static void catc_set_multicast_list(struct net_device *netdev)
627 {
628 	struct catc *catc = netdev_priv(netdev);
629 	struct netdev_hw_addr *ha;
630 	u8 broadcast[ETH_ALEN];
631 	u8 rx = RxEnable | RxPolarity | RxMultiCast;
632 
633 	eth_broadcast_addr(broadcast);
634 	memset(catc->multicast, 0, 64);
635 
636 	catc_multicast(broadcast, catc->multicast);
637 	catc_multicast(netdev->dev_addr, catc->multicast);
638 
639 	if (netdev->flags & IFF_PROMISC) {
640 		memset(catc->multicast, 0xff, 64);
641 		rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
642 	}
643 
644 	if (netdev->flags & IFF_ALLMULTI) {
645 		memset(catc->multicast, 0xff, 64);
646 	} else {
647 		netdev_for_each_mc_addr(ha, netdev) {
648 			u32 crc = ether_crc_le(6, ha->addr);
649 			if (!catc->is_f5u011) {
650 				catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
651 			} else {
652 				catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7);
653 			}
654 		}
655 	}
656 	if (!catc->is_f5u011) {
657 		catc_set_reg_async(catc, RxUnit, rx);
658 		catc_write_mem_async(catc, 0xfa80, catc->multicast, 64);
659 	} else {
660 		f5u011_mchash_async(catc, catc->multicast);
661 		if (catc->rxmode[0] != rx) {
662 			catc->rxmode[0] = rx;
663 			netdev_dbg(catc->netdev,
664 				   "Setting RX mode to %2.2X %2.2X\n",
665 				   catc->rxmode[0], catc->rxmode[1]);
666 			f5u011_rxmode_async(catc, catc->rxmode);
667 		}
668 	}
669 }
670 
671 static void catc_get_drvinfo(struct net_device *dev,
672 			     struct ethtool_drvinfo *info)
673 {
674 	struct catc *catc = netdev_priv(dev);
675 	strlcpy(info->driver, driver_name, sizeof(info->driver));
676 	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
677 	usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
678 }
679 
680 static int catc_get_link_ksettings(struct net_device *dev,
681 				   struct ethtool_link_ksettings *cmd)
682 {
683 	struct catc *catc = netdev_priv(dev);
684 	if (!catc->is_f5u011)
685 		return -EOPNOTSUPP;
686 
687 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
688 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
689 	ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
690 
691 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
692 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
693 	ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
694 
695 	cmd->base.speed = SPEED_10;
696 	cmd->base.duplex = DUPLEX_HALF;
697 	cmd->base.port = PORT_TP;
698 	cmd->base.phy_address = 0;
699 	cmd->base.autoneg = AUTONEG_DISABLE;
700 
701 	return 0;
702 }
703 
704 static const struct ethtool_ops ops = {
705 	.get_drvinfo = catc_get_drvinfo,
706 	.get_link = ethtool_op_get_link,
707 	.get_link_ksettings = catc_get_link_ksettings,
708 };
709 
710 /*
711  * Open, close.
712  */
713 
714 static int catc_open(struct net_device *netdev)
715 {
716 	struct catc *catc = netdev_priv(netdev);
717 	int status;
718 
719 	catc->irq_urb->dev = catc->usbdev;
720 	if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) {
721 		dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n",
722 			status);
723 		return -1;
724 	}
725 
726 	netif_start_queue(netdev);
727 
728 	if (!catc->is_f5u011)
729 		mod_timer(&catc->timer, jiffies + STATS_UPDATE);
730 
731 	return 0;
732 }
733 
734 static int catc_stop(struct net_device *netdev)
735 {
736 	struct catc *catc = netdev_priv(netdev);
737 
738 	netif_stop_queue(netdev);
739 
740 	if (!catc->is_f5u011)
741 		del_timer_sync(&catc->timer);
742 
743 	usb_kill_urb(catc->rx_urb);
744 	usb_kill_urb(catc->tx_urb);
745 	usb_kill_urb(catc->irq_urb);
746 	usb_kill_urb(catc->ctrl_urb);
747 
748 	return 0;
749 }
750 
751 static const struct net_device_ops catc_netdev_ops = {
752 	.ndo_open		= catc_open,
753 	.ndo_stop		= catc_stop,
754 	.ndo_start_xmit		= catc_start_xmit,
755 
756 	.ndo_tx_timeout		= catc_tx_timeout,
757 	.ndo_set_rx_mode	= catc_set_multicast_list,
758 	.ndo_set_mac_address 	= eth_mac_addr,
759 	.ndo_validate_addr	= eth_validate_addr,
760 };
761 
762 /*
763  * USB probe, disconnect.
764  */
765 
766 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
767 {
768 	struct device *dev = &intf->dev;
769 	struct usb_device *usbdev = interface_to_usbdev(intf);
770 	struct net_device *netdev;
771 	struct catc *catc;
772 	u8 broadcast[ETH_ALEN];
773 	u8 *macbuf;
774 	int pktsz, ret = -ENOMEM;
775 
776 	macbuf = kmalloc(ETH_ALEN, GFP_KERNEL);
777 	if (!macbuf)
778 		goto error;
779 
780 	if (usb_set_interface(usbdev,
781 			intf->altsetting->desc.bInterfaceNumber, 1)) {
782 		dev_err(dev, "Can't set altsetting 1.\n");
783 		ret = -EIO;
784 		goto fail_mem;;
785 	}
786 
787 	netdev = alloc_etherdev(sizeof(struct catc));
788 	if (!netdev)
789 		goto fail_mem;
790 
791 	catc = netdev_priv(netdev);
792 
793 	netdev->netdev_ops = &catc_netdev_ops;
794 	netdev->watchdog_timeo = TX_TIMEOUT;
795 	netdev->ethtool_ops = &ops;
796 
797 	catc->usbdev = usbdev;
798 	catc->netdev = netdev;
799 
800 	spin_lock_init(&catc->tx_lock);
801 	spin_lock_init(&catc->ctrl_lock);
802 
803 	timer_setup(&catc->timer, catc_stats_timer, 0);
804 
805 	catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
806 	catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
807 	catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
808 	catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
809 	if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
810 	    (!catc->rx_urb) || (!catc->irq_urb)) {
811 		dev_err(&intf->dev, "No free urbs available.\n");
812 		ret = -ENOMEM;
813 		goto fail_free;
814 	}
815 
816 	/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
817 	if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
818 	    le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
819 	    le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
820 		dev_dbg(dev, "Testing for f5u011\n");
821 		catc->is_f5u011 = 1;
822 		atomic_set(&catc->recq_sz, 0);
823 		pktsz = RX_PKT_SZ;
824 	} else {
825 		pktsz = RX_MAX_BURST * (PKT_SZ + 2);
826 	}
827 
828 	usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
829 		NULL, NULL, 0, catc_ctrl_done, catc);
830 
831 	usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1),
832 		NULL, 0, catc_tx_done, catc);
833 
834 	usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1),
835 		catc->rx_buf, pktsz, catc_rx_done, catc);
836 
837 	usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2),
838                 catc->irq_buf, 2, catc_irq_done, catc, 1);
839 
840 	if (!catc->is_f5u011) {
841 		u32 *buf;
842 		int i;
843 
844 		dev_dbg(dev, "Checking memory size\n");
845 
846 		buf = kmalloc(4, GFP_KERNEL);
847 		if (!buf) {
848 			ret = -ENOMEM;
849 			goto fail_free;
850 		}
851 
852 		*buf = 0x12345678;
853 		catc_write_mem(catc, 0x7a80, buf, 4);
854 		*buf = 0x87654321;
855 		catc_write_mem(catc, 0xfa80, buf, 4);
856 		catc_read_mem(catc, 0x7a80, buf, 4);
857 
858 		switch (*buf) {
859 		case 0x12345678:
860 			catc_set_reg(catc, TxBufCount, 8);
861 			catc_set_reg(catc, RxBufCount, 32);
862 			dev_dbg(dev, "64k Memory\n");
863 			break;
864 		default:
865 			dev_warn(&intf->dev,
866 				 "Couldn't detect memory size, assuming 32k\n");
867 			fallthrough;
868 		case 0x87654321:
869 			catc_set_reg(catc, TxBufCount, 4);
870 			catc_set_reg(catc, RxBufCount, 16);
871 			dev_dbg(dev, "32k Memory\n");
872 			break;
873 		}
874 
875 		kfree(buf);
876 
877 		dev_dbg(dev, "Getting MAC from SEEROM.\n");
878 
879 		catc_get_mac(catc, macbuf);
880 		eth_hw_addr_set(netdev, macbuf);
881 
882 		dev_dbg(dev, "Setting MAC into registers.\n");
883 
884 		for (i = 0; i < 6; i++)
885 			catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
886 
887 		dev_dbg(dev, "Filling the multicast list.\n");
888 
889 		eth_broadcast_addr(broadcast);
890 		catc_multicast(broadcast, catc->multicast);
891 		catc_multicast(netdev->dev_addr, catc->multicast);
892 		catc_write_mem(catc, 0xfa80, catc->multicast, 64);
893 
894 		dev_dbg(dev, "Clearing error counters.\n");
895 
896 		for (i = 0; i < 8; i++)
897 			catc_set_reg(catc, EthStats + i, 0);
898 		catc->last_stats = jiffies;
899 
900 		dev_dbg(dev, "Enabling.\n");
901 
902 		catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
903 		catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
904 		catc_set_reg(catc, LEDCtrl, LEDLink);
905 		catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
906 	} else {
907 		dev_dbg(dev, "Performing reset\n");
908 		catc_reset(catc);
909 		catc_get_mac(catc, macbuf);
910 		eth_hw_addr_set(netdev, macbuf);
911 
912 		dev_dbg(dev, "Setting RX Mode\n");
913 		catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
914 		catc->rxmode[1] = 0;
915 		f5u011_rxmode(catc, catc->rxmode);
916 	}
917 	dev_dbg(dev, "Init done.\n");
918 	printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
919 	       netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
920 	       usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
921 	usb_set_intfdata(intf, catc);
922 
923 	SET_NETDEV_DEV(netdev, &intf->dev);
924 	ret = register_netdev(netdev);
925 	if (ret)
926 		goto fail_clear_intfdata;
927 
928 	kfree(macbuf);
929 	return 0;
930 
931 fail_clear_intfdata:
932 	usb_set_intfdata(intf, NULL);
933 fail_free:
934 	usb_free_urb(catc->ctrl_urb);
935 	usb_free_urb(catc->tx_urb);
936 	usb_free_urb(catc->rx_urb);
937 	usb_free_urb(catc->irq_urb);
938 	free_netdev(netdev);
939 fail_mem:
940 	kfree(macbuf);
941 error:
942 	return ret;
943 }
944 
945 static void catc_disconnect(struct usb_interface *intf)
946 {
947 	struct catc *catc = usb_get_intfdata(intf);
948 
949 	usb_set_intfdata(intf, NULL);
950 	if (catc) {
951 		unregister_netdev(catc->netdev);
952 		usb_free_urb(catc->ctrl_urb);
953 		usb_free_urb(catc->tx_urb);
954 		usb_free_urb(catc->rx_urb);
955 		usb_free_urb(catc->irq_urb);
956 		free_netdev(catc->netdev);
957 	}
958 }
959 
960 /*
961  * Module functions and tables.
962  */
963 
964 static const struct usb_device_id catc_id_table[] = {
965 	{ USB_DEVICE(0x0423, 0xa) },	/* CATC Netmate, Belkin F5U011 */
966 	{ USB_DEVICE(0x0423, 0xc) },	/* CATC Netmate II, Belkin F5U111 */
967 	{ USB_DEVICE(0x08d1, 0x1) },	/* smartBridges smartNIC */
968 	{ }
969 };
970 
971 MODULE_DEVICE_TABLE(usb, catc_id_table);
972 
973 static struct usb_driver catc_driver = {
974 	.name =		driver_name,
975 	.probe =	catc_probe,
976 	.disconnect =	catc_disconnect,
977 	.id_table =	catc_id_table,
978 	.disable_hub_initiated_lpm = 1,
979 };
980 
981 module_usb_driver(catc_driver);
982