1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2001 Vojtech Pavlik 4 * 5 * CATC EL1210A NetMate USB Ethernet driver 6 * 7 * Sponsored by SuSE 8 * 9 * Based on the work of 10 * Donald Becker 11 * 12 * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 13 * - adds support for Belkin F5U011 14 */ 15 16 /* 17 * 18 * Should you need to contact me, the author, you can do so either by 19 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: 20 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic 21 */ 22 23 #include <linux/module.h> 24 #include <linux/kernel.h> 25 #include <linux/string.h> 26 #include <linux/netdevice.h> 27 #include <linux/etherdevice.h> 28 #include <linux/skbuff.h> 29 #include <linux/spinlock.h> 30 #include <linux/ethtool.h> 31 #include <linux/crc32.h> 32 #include <linux/bitops.h> 33 #include <linux/gfp.h> 34 #include <linux/uaccess.h> 35 36 #undef DEBUG 37 38 #include <linux/usb.h> 39 40 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" 41 #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" 42 43 MODULE_AUTHOR(DRIVER_AUTHOR); 44 MODULE_DESCRIPTION(DRIVER_DESC); 45 MODULE_LICENSE("GPL"); 46 47 static const char driver_name[] = "catc"; 48 49 /* 50 * Some defines. 51 */ 52 53 #define STATS_UPDATE (HZ) /* Time between stats updates */ 54 #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ 55 #define PKT_SZ 1536 /* Max Ethernet packet size */ 56 #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ 57 #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ 58 #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ 59 #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ 60 61 /* 62 * Control requests. 63 */ 64 65 enum control_requests { 66 ReadMem = 0xf1, 67 GetMac = 0xf2, 68 Reset = 0xf4, 69 SetMac = 0xf5, 70 SetRxMode = 0xf5, /* F5U011 only */ 71 WriteROM = 0xf8, 72 SetReg = 0xfa, 73 GetReg = 0xfb, 74 WriteMem = 0xfc, 75 ReadROM = 0xfd, 76 }; 77 78 /* 79 * Registers. 80 */ 81 82 enum register_offsets { 83 TxBufCount = 0x20, 84 RxBufCount = 0x21, 85 OpModes = 0x22, 86 TxQed = 0x23, 87 RxQed = 0x24, 88 MaxBurst = 0x25, 89 RxUnit = 0x60, 90 EthStatus = 0x61, 91 StationAddr0 = 0x67, 92 EthStats = 0x69, 93 LEDCtrl = 0x81, 94 }; 95 96 enum eth_stats { 97 TxSingleColl = 0x00, 98 TxMultiColl = 0x02, 99 TxExcessColl = 0x04, 100 RxFramErr = 0x06, 101 }; 102 103 enum op_mode_bits { 104 Op3MemWaits = 0x03, 105 OpLenInclude = 0x08, 106 OpRxMerge = 0x10, 107 OpTxMerge = 0x20, 108 OpWin95bugfix = 0x40, 109 OpLoopback = 0x80, 110 }; 111 112 enum rx_filter_bits { 113 RxEnable = 0x01, 114 RxPolarity = 0x02, 115 RxForceOK = 0x04, 116 RxMultiCast = 0x08, 117 RxPromisc = 0x10, 118 AltRxPromisc = 0x20, /* F5U011 uses different bit */ 119 }; 120 121 enum led_values { 122 LEDFast = 0x01, 123 LEDSlow = 0x02, 124 LEDFlash = 0x03, 125 LEDPulse = 0x04, 126 LEDLink = 0x08, 127 }; 128 129 enum link_status { 130 LinkNoChange = 0, 131 LinkGood = 1, 132 LinkBad = 2 133 }; 134 135 /* 136 * The catc struct. 137 */ 138 139 #define CTRL_RUNNING 0 140 #define RX_RUNNING 1 141 #define TX_RUNNING 2 142 143 struct catc { 144 struct net_device *netdev; 145 struct usb_device *usbdev; 146 147 unsigned long flags; 148 149 unsigned int tx_ptr, tx_idx; 150 unsigned int ctrl_head, ctrl_tail; 151 spinlock_t tx_lock, ctrl_lock; 152 153 u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; 154 u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; 155 u8 irq_buf[2]; 156 u8 ctrl_buf[64]; 157 struct usb_ctrlrequest ctrl_dr; 158 159 struct timer_list timer; 160 u8 stats_buf[8]; 161 u16 stats_vals[4]; 162 unsigned long last_stats; 163 164 u8 multicast[64]; 165 166 struct ctrl_queue { 167 u8 dir; 168 u8 request; 169 u16 value; 170 u16 index; 171 void *buf; 172 int len; 173 void (*callback)(struct catc *catc, struct ctrl_queue *q); 174 } ctrl_queue[CTRL_QUEUE]; 175 176 struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; 177 178 u8 is_f5u011; /* Set if device is an F5U011 */ 179 u8 rxmode[2]; /* Used for F5U011 */ 180 atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ 181 }; 182 183 /* 184 * Useful macros. 185 */ 186 187 #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) 188 #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) 189 #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) 190 #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) 191 #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) 192 #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) 193 194 #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) 195 #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) 196 #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) 197 198 #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) 199 #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) 200 #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) 201 202 /* 203 * Receive routines. 204 */ 205 206 static void catc_rx_done(struct urb *urb) 207 { 208 struct catc *catc = urb->context; 209 u8 *pkt_start = urb->transfer_buffer; 210 struct sk_buff *skb; 211 int pkt_len, pkt_offset = 0; 212 int status = urb->status; 213 214 if (!catc->is_f5u011) { 215 clear_bit(RX_RUNNING, &catc->flags); 216 pkt_offset = 2; 217 } 218 219 if (status) { 220 dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n", 221 status, urb->actual_length); 222 return; 223 } 224 225 do { 226 if(!catc->is_f5u011) { 227 pkt_len = le16_to_cpup((__le16*)pkt_start); 228 if (pkt_len > urb->actual_length) { 229 catc->netdev->stats.rx_length_errors++; 230 catc->netdev->stats.rx_errors++; 231 break; 232 } 233 } else { 234 pkt_len = urb->actual_length; 235 } 236 237 if (!(skb = dev_alloc_skb(pkt_len))) 238 return; 239 240 skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); 241 skb_put(skb, pkt_len); 242 243 skb->protocol = eth_type_trans(skb, catc->netdev); 244 netif_rx(skb); 245 246 catc->netdev->stats.rx_packets++; 247 catc->netdev->stats.rx_bytes += pkt_len; 248 249 /* F5U011 only does one packet per RX */ 250 if (catc->is_f5u011) 251 break; 252 pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; 253 254 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); 255 256 if (catc->is_f5u011) { 257 if (atomic_read(&catc->recq_sz)) { 258 int state; 259 atomic_dec(&catc->recq_sz); 260 netdev_dbg(catc->netdev, "getting extra packet\n"); 261 urb->dev = catc->usbdev; 262 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { 263 netdev_dbg(catc->netdev, 264 "submit(rx_urb) status %d\n", state); 265 } 266 } else { 267 clear_bit(RX_RUNNING, &catc->flags); 268 } 269 } 270 } 271 272 static void catc_irq_done(struct urb *urb) 273 { 274 struct catc *catc = urb->context; 275 u8 *data = urb->transfer_buffer; 276 int status = urb->status; 277 unsigned int hasdata, linksts = LinkNoChange; 278 int res; 279 280 if (!catc->is_f5u011) { 281 hasdata = data[1] & 0x80; 282 if (data[1] & 0x40) 283 linksts = LinkGood; 284 else if (data[1] & 0x20) 285 linksts = LinkBad; 286 } else { 287 hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); 288 if (data[0] == 0x90) 289 linksts = LinkGood; 290 else if (data[0] == 0xA0) 291 linksts = LinkBad; 292 } 293 294 switch (status) { 295 case 0: /* success */ 296 break; 297 case -ECONNRESET: /* unlink */ 298 case -ENOENT: 299 case -ESHUTDOWN: 300 return; 301 /* -EPIPE: should clear the halt */ 302 default: /* error */ 303 dev_dbg(&urb->dev->dev, 304 "irq_done, status %d, data %02x %02x.\n", 305 status, data[0], data[1]); 306 goto resubmit; 307 } 308 309 if (linksts == LinkGood) { 310 netif_carrier_on(catc->netdev); 311 netdev_dbg(catc->netdev, "link ok\n"); 312 } 313 314 if (linksts == LinkBad) { 315 netif_carrier_off(catc->netdev); 316 netdev_dbg(catc->netdev, "link bad\n"); 317 } 318 319 if (hasdata) { 320 if (test_and_set_bit(RX_RUNNING, &catc->flags)) { 321 if (catc->is_f5u011) 322 atomic_inc(&catc->recq_sz); 323 } else { 324 catc->rx_urb->dev = catc->usbdev; 325 if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { 326 dev_err(&catc->usbdev->dev, 327 "submit(rx_urb) status %d\n", res); 328 } 329 } 330 } 331 resubmit: 332 res = usb_submit_urb (urb, GFP_ATOMIC); 333 if (res) 334 dev_err(&catc->usbdev->dev, 335 "can't resubmit intr, %s-%s, status %d\n", 336 catc->usbdev->bus->bus_name, 337 catc->usbdev->devpath, res); 338 } 339 340 /* 341 * Transmit routines. 342 */ 343 344 static int catc_tx_run(struct catc *catc) 345 { 346 int status; 347 348 if (catc->is_f5u011) 349 catc->tx_ptr = (catc->tx_ptr + 63) & ~63; 350 351 catc->tx_urb->transfer_buffer_length = catc->tx_ptr; 352 catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; 353 catc->tx_urb->dev = catc->usbdev; 354 355 if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) 356 dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n", 357 status); 358 359 catc->tx_idx = !catc->tx_idx; 360 catc->tx_ptr = 0; 361 362 netif_trans_update(catc->netdev); 363 return status; 364 } 365 366 static void catc_tx_done(struct urb *urb) 367 { 368 struct catc *catc = urb->context; 369 unsigned long flags; 370 int r, status = urb->status; 371 372 if (status == -ECONNRESET) { 373 dev_dbg(&urb->dev->dev, "Tx Reset.\n"); 374 urb->status = 0; 375 netif_trans_update(catc->netdev); 376 catc->netdev->stats.tx_errors++; 377 clear_bit(TX_RUNNING, &catc->flags); 378 netif_wake_queue(catc->netdev); 379 return; 380 } 381 382 if (status) { 383 dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n", 384 status, urb->actual_length); 385 return; 386 } 387 388 spin_lock_irqsave(&catc->tx_lock, flags); 389 390 if (catc->tx_ptr) { 391 r = catc_tx_run(catc); 392 if (unlikely(r < 0)) 393 clear_bit(TX_RUNNING, &catc->flags); 394 } else { 395 clear_bit(TX_RUNNING, &catc->flags); 396 } 397 398 netif_wake_queue(catc->netdev); 399 400 spin_unlock_irqrestore(&catc->tx_lock, flags); 401 } 402 403 static netdev_tx_t catc_start_xmit(struct sk_buff *skb, 404 struct net_device *netdev) 405 { 406 struct catc *catc = netdev_priv(netdev); 407 unsigned long flags; 408 int r = 0; 409 char *tx_buf; 410 411 spin_lock_irqsave(&catc->tx_lock, flags); 412 413 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; 414 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; 415 if (catc->is_f5u011) 416 *(__be16 *)tx_buf = cpu_to_be16(skb->len); 417 else 418 *(__le16 *)tx_buf = cpu_to_le16(skb->len); 419 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); 420 catc->tx_ptr += skb->len + 2; 421 422 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { 423 r = catc_tx_run(catc); 424 if (r < 0) 425 clear_bit(TX_RUNNING, &catc->flags); 426 } 427 428 if ((catc->is_f5u011 && catc->tx_ptr) || 429 (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) 430 netif_stop_queue(netdev); 431 432 spin_unlock_irqrestore(&catc->tx_lock, flags); 433 434 if (r >= 0) { 435 catc->netdev->stats.tx_bytes += skb->len; 436 catc->netdev->stats.tx_packets++; 437 } 438 439 dev_kfree_skb(skb); 440 441 return NETDEV_TX_OK; 442 } 443 444 static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue) 445 { 446 struct catc *catc = netdev_priv(netdev); 447 448 dev_warn(&netdev->dev, "Transmit timed out.\n"); 449 usb_unlink_urb(catc->tx_urb); 450 } 451 452 /* 453 * Control messages. 454 */ 455 456 static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) 457 { 458 int retval = usb_control_msg(catc->usbdev, 459 dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), 460 request, 0x40 | dir, value, index, buf, len, 1000); 461 return retval < 0 ? retval : 0; 462 } 463 464 static void catc_ctrl_run(struct catc *catc) 465 { 466 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; 467 struct usb_device *usbdev = catc->usbdev; 468 struct urb *urb = catc->ctrl_urb; 469 struct usb_ctrlrequest *dr = &catc->ctrl_dr; 470 int status; 471 472 dr->bRequest = q->request; 473 dr->bRequestType = 0x40 | q->dir; 474 dr->wValue = cpu_to_le16(q->value); 475 dr->wIndex = cpu_to_le16(q->index); 476 dr->wLength = cpu_to_le16(q->len); 477 478 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); 479 urb->transfer_buffer_length = q->len; 480 urb->transfer_buffer = catc->ctrl_buf; 481 urb->setup_packet = (void *) dr; 482 urb->dev = usbdev; 483 484 if (!q->dir && q->buf && q->len) 485 memcpy(catc->ctrl_buf, q->buf, q->len); 486 487 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) 488 dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n", 489 status); 490 } 491 492 static void catc_ctrl_done(struct urb *urb) 493 { 494 struct catc *catc = urb->context; 495 struct ctrl_queue *q; 496 unsigned long flags; 497 int status = urb->status; 498 499 if (status) 500 dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n", 501 status, urb->actual_length); 502 503 spin_lock_irqsave(&catc->ctrl_lock, flags); 504 505 q = catc->ctrl_queue + catc->ctrl_tail; 506 507 if (q->dir) { 508 if (q->buf && q->len) 509 memcpy(q->buf, catc->ctrl_buf, q->len); 510 else 511 q->buf = catc->ctrl_buf; 512 } 513 514 if (q->callback) 515 q->callback(catc, q); 516 517 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); 518 519 if (catc->ctrl_head != catc->ctrl_tail) 520 catc_ctrl_run(catc); 521 else 522 clear_bit(CTRL_RUNNING, &catc->flags); 523 524 spin_unlock_irqrestore(&catc->ctrl_lock, flags); 525 } 526 527 static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, 528 u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) 529 { 530 struct ctrl_queue *q; 531 int retval = 0; 532 unsigned long flags; 533 534 spin_lock_irqsave(&catc->ctrl_lock, flags); 535 536 q = catc->ctrl_queue + catc->ctrl_head; 537 538 q->dir = dir; 539 q->request = request; 540 q->value = value; 541 q->index = index; 542 q->buf = buf; 543 q->len = len; 544 q->callback = callback; 545 546 catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); 547 548 if (catc->ctrl_head == catc->ctrl_tail) { 549 dev_err(&catc->usbdev->dev, "ctrl queue full\n"); 550 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); 551 retval = -1; 552 } 553 554 if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) 555 catc_ctrl_run(catc); 556 557 spin_unlock_irqrestore(&catc->ctrl_lock, flags); 558 559 return retval; 560 } 561 562 /* 563 * Statistics. 564 */ 565 566 static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) 567 { 568 int index = q->index - EthStats; 569 u16 data, last; 570 571 catc->stats_buf[index] = *((char *)q->buf); 572 573 if (index & 1) 574 return; 575 576 data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; 577 last = catc->stats_vals[index >> 1]; 578 579 switch (index) { 580 case TxSingleColl: 581 case TxMultiColl: 582 catc->netdev->stats.collisions += data - last; 583 break; 584 case TxExcessColl: 585 catc->netdev->stats.tx_aborted_errors += data - last; 586 catc->netdev->stats.tx_errors += data - last; 587 break; 588 case RxFramErr: 589 catc->netdev->stats.rx_frame_errors += data - last; 590 catc->netdev->stats.rx_errors += data - last; 591 break; 592 } 593 594 catc->stats_vals[index >> 1] = data; 595 } 596 597 static void catc_stats_timer(struct timer_list *t) 598 { 599 struct catc *catc = timer_container_of(catc, t, timer); 600 int i; 601 602 for (i = 0; i < 8; i++) 603 catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); 604 605 mod_timer(&catc->timer, jiffies + STATS_UPDATE); 606 } 607 608 /* 609 * Receive modes. Broadcast, Multicast, Promisc. 610 */ 611 612 static void catc_multicast(const unsigned char *addr, u8 *multicast) 613 { 614 u32 crc; 615 616 crc = ether_crc_le(6, addr); 617 multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); 618 } 619 620 static void catc_set_multicast_list(struct net_device *netdev) 621 { 622 struct catc *catc = netdev_priv(netdev); 623 struct netdev_hw_addr *ha; 624 u8 broadcast[ETH_ALEN]; 625 u8 rx = RxEnable | RxPolarity | RxMultiCast; 626 627 eth_broadcast_addr(broadcast); 628 memset(catc->multicast, 0, 64); 629 630 catc_multicast(broadcast, catc->multicast); 631 catc_multicast(netdev->dev_addr, catc->multicast); 632 633 if (netdev->flags & IFF_PROMISC) { 634 memset(catc->multicast, 0xff, 64); 635 rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; 636 } 637 638 if (netdev->flags & IFF_ALLMULTI) { 639 memset(catc->multicast, 0xff, 64); 640 } else { 641 netdev_for_each_mc_addr(ha, netdev) { 642 u32 crc = ether_crc_le(6, ha->addr); 643 if (!catc->is_f5u011) { 644 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); 645 } else { 646 catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); 647 } 648 } 649 } 650 if (!catc->is_f5u011) { 651 catc_set_reg_async(catc, RxUnit, rx); 652 catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); 653 } else { 654 f5u011_mchash_async(catc, catc->multicast); 655 if (catc->rxmode[0] != rx) { 656 catc->rxmode[0] = rx; 657 netdev_dbg(catc->netdev, 658 "Setting RX mode to %2.2X %2.2X\n", 659 catc->rxmode[0], catc->rxmode[1]); 660 f5u011_rxmode_async(catc, catc->rxmode); 661 } 662 } 663 } 664 665 static void catc_get_drvinfo(struct net_device *dev, 666 struct ethtool_drvinfo *info) 667 { 668 struct catc *catc = netdev_priv(dev); 669 strscpy(info->driver, driver_name, sizeof(info->driver)); 670 usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); 671 } 672 673 static int catc_get_link_ksettings(struct net_device *dev, 674 struct ethtool_link_ksettings *cmd) 675 { 676 struct catc *catc = netdev_priv(dev); 677 if (!catc->is_f5u011) 678 return -EOPNOTSUPP; 679 680 ethtool_link_ksettings_zero_link_mode(cmd, supported); 681 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); 682 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 683 684 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 685 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); 686 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 687 688 cmd->base.speed = SPEED_10; 689 cmd->base.duplex = DUPLEX_HALF; 690 cmd->base.port = PORT_TP; 691 cmd->base.phy_address = 0; 692 cmd->base.autoneg = AUTONEG_DISABLE; 693 694 return 0; 695 } 696 697 static const struct ethtool_ops ops = { 698 .get_drvinfo = catc_get_drvinfo, 699 .get_link = ethtool_op_get_link, 700 .get_link_ksettings = catc_get_link_ksettings, 701 }; 702 703 /* 704 * Open, close. 705 */ 706 707 static int catc_open(struct net_device *netdev) 708 { 709 struct catc *catc = netdev_priv(netdev); 710 int status; 711 712 catc->irq_urb->dev = catc->usbdev; 713 if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { 714 dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n", 715 status); 716 return -1; 717 } 718 719 netif_start_queue(netdev); 720 721 if (!catc->is_f5u011) 722 mod_timer(&catc->timer, jiffies + STATS_UPDATE); 723 724 return 0; 725 } 726 727 static int catc_stop(struct net_device *netdev) 728 { 729 struct catc *catc = netdev_priv(netdev); 730 731 netif_stop_queue(netdev); 732 733 if (!catc->is_f5u011) 734 timer_delete_sync(&catc->timer); 735 736 usb_kill_urb(catc->rx_urb); 737 usb_kill_urb(catc->tx_urb); 738 usb_kill_urb(catc->irq_urb); 739 usb_kill_urb(catc->ctrl_urb); 740 741 return 0; 742 } 743 744 static const struct net_device_ops catc_netdev_ops = { 745 .ndo_open = catc_open, 746 .ndo_stop = catc_stop, 747 .ndo_start_xmit = catc_start_xmit, 748 749 .ndo_tx_timeout = catc_tx_timeout, 750 .ndo_set_rx_mode = catc_set_multicast_list, 751 .ndo_set_mac_address = eth_mac_addr, 752 .ndo_validate_addr = eth_validate_addr, 753 }; 754 755 /* 756 * USB probe, disconnect. 757 */ 758 759 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) 760 { 761 struct device *dev = &intf->dev; 762 struct usb_device *usbdev = interface_to_usbdev(intf); 763 struct net_device *netdev; 764 struct catc *catc; 765 u8 broadcast[ETH_ALEN]; 766 u8 *macbuf; 767 int pktsz, ret = -ENOMEM; 768 769 macbuf = kmalloc(ETH_ALEN, GFP_KERNEL); 770 if (!macbuf) 771 goto error; 772 773 if (usb_set_interface(usbdev, 774 intf->altsetting->desc.bInterfaceNumber, 1)) { 775 dev_err(dev, "Can't set altsetting 1.\n"); 776 ret = -EIO; 777 goto fail_mem; 778 } 779 780 netdev = alloc_etherdev(sizeof(struct catc)); 781 if (!netdev) 782 goto fail_mem; 783 784 catc = netdev_priv(netdev); 785 786 netdev->netdev_ops = &catc_netdev_ops; 787 netdev->watchdog_timeo = TX_TIMEOUT; 788 netdev->ethtool_ops = &ops; 789 790 catc->usbdev = usbdev; 791 catc->netdev = netdev; 792 793 spin_lock_init(&catc->tx_lock); 794 spin_lock_init(&catc->ctrl_lock); 795 796 timer_setup(&catc->timer, catc_stats_timer, 0); 797 798 catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); 799 catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 800 catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 801 catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); 802 if ((!catc->ctrl_urb) || (!catc->tx_urb) || 803 (!catc->rx_urb) || (!catc->irq_urb)) { 804 dev_err(&intf->dev, "No free urbs available.\n"); 805 ret = -ENOMEM; 806 goto fail_free; 807 } 808 809 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ 810 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 811 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && 812 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { 813 dev_dbg(dev, "Testing for f5u011\n"); 814 catc->is_f5u011 = 1; 815 atomic_set(&catc->recq_sz, 0); 816 pktsz = RX_PKT_SZ; 817 } else { 818 pktsz = RX_MAX_BURST * (PKT_SZ + 2); 819 } 820 821 usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), 822 NULL, NULL, 0, catc_ctrl_done, catc); 823 824 usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), 825 NULL, 0, catc_tx_done, catc); 826 827 usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), 828 catc->rx_buf, pktsz, catc_rx_done, catc); 829 830 usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), 831 catc->irq_buf, 2, catc_irq_done, catc, 1); 832 833 if (!catc->is_f5u011) { 834 u32 *buf; 835 int i; 836 837 dev_dbg(dev, "Checking memory size\n"); 838 839 buf = kmalloc(4, GFP_KERNEL); 840 if (!buf) { 841 ret = -ENOMEM; 842 goto fail_free; 843 } 844 845 *buf = 0x12345678; 846 catc_write_mem(catc, 0x7a80, buf, 4); 847 *buf = 0x87654321; 848 catc_write_mem(catc, 0xfa80, buf, 4); 849 catc_read_mem(catc, 0x7a80, buf, 4); 850 851 switch (*buf) { 852 case 0x12345678: 853 catc_set_reg(catc, TxBufCount, 8); 854 catc_set_reg(catc, RxBufCount, 32); 855 dev_dbg(dev, "64k Memory\n"); 856 break; 857 default: 858 dev_warn(&intf->dev, 859 "Couldn't detect memory size, assuming 32k\n"); 860 fallthrough; 861 case 0x87654321: 862 catc_set_reg(catc, TxBufCount, 4); 863 catc_set_reg(catc, RxBufCount, 16); 864 dev_dbg(dev, "32k Memory\n"); 865 break; 866 } 867 868 kfree(buf); 869 870 dev_dbg(dev, "Getting MAC from SEEROM.\n"); 871 872 catc_get_mac(catc, macbuf); 873 eth_hw_addr_set(netdev, macbuf); 874 875 dev_dbg(dev, "Setting MAC into registers.\n"); 876 877 for (i = 0; i < 6; i++) 878 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); 879 880 dev_dbg(dev, "Filling the multicast list.\n"); 881 882 eth_broadcast_addr(broadcast); 883 catc_multicast(broadcast, catc->multicast); 884 catc_multicast(netdev->dev_addr, catc->multicast); 885 catc_write_mem(catc, 0xfa80, catc->multicast, 64); 886 887 dev_dbg(dev, "Clearing error counters.\n"); 888 889 for (i = 0; i < 8; i++) 890 catc_set_reg(catc, EthStats + i, 0); 891 catc->last_stats = jiffies; 892 893 dev_dbg(dev, "Enabling.\n"); 894 895 catc_set_reg(catc, MaxBurst, RX_MAX_BURST); 896 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); 897 catc_set_reg(catc, LEDCtrl, LEDLink); 898 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); 899 } else { 900 dev_dbg(dev, "Performing reset\n"); 901 catc_reset(catc); 902 catc_get_mac(catc, macbuf); 903 eth_hw_addr_set(netdev, macbuf); 904 905 dev_dbg(dev, "Setting RX Mode\n"); 906 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; 907 catc->rxmode[1] = 0; 908 f5u011_rxmode(catc, catc->rxmode); 909 } 910 dev_dbg(dev, "Init done.\n"); 911 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", 912 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 913 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); 914 usb_set_intfdata(intf, catc); 915 916 SET_NETDEV_DEV(netdev, &intf->dev); 917 ret = register_netdev(netdev); 918 if (ret) 919 goto fail_clear_intfdata; 920 921 kfree(macbuf); 922 return 0; 923 924 fail_clear_intfdata: 925 usb_set_intfdata(intf, NULL); 926 fail_free: 927 usb_free_urb(catc->ctrl_urb); 928 usb_free_urb(catc->tx_urb); 929 usb_free_urb(catc->rx_urb); 930 usb_free_urb(catc->irq_urb); 931 free_netdev(netdev); 932 fail_mem: 933 kfree(macbuf); 934 error: 935 return ret; 936 } 937 938 static void catc_disconnect(struct usb_interface *intf) 939 { 940 struct catc *catc = usb_get_intfdata(intf); 941 942 usb_set_intfdata(intf, NULL); 943 if (catc) { 944 unregister_netdev(catc->netdev); 945 usb_free_urb(catc->ctrl_urb); 946 usb_free_urb(catc->tx_urb); 947 usb_free_urb(catc->rx_urb); 948 usb_free_urb(catc->irq_urb); 949 free_netdev(catc->netdev); 950 } 951 } 952 953 /* 954 * Module functions and tables. 955 */ 956 957 static const struct usb_device_id catc_id_table[] = { 958 { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ 959 { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ 960 { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ 961 { } 962 }; 963 964 MODULE_DEVICE_TABLE(usb, catc_id_table); 965 966 static struct usb_driver catc_driver = { 967 .name = driver_name, 968 .probe = catc_probe, 969 .disconnect = catc_disconnect, 970 .id_table = catc_id_table, 971 .disable_hub_initiated_lpm = 1, 972 }; 973 974 module_usb_driver(catc_driver); 975