1 /* 2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 3 * 4 * Copyright (C) 2003-2005,2008 David Brownell 5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 * Copyright (C) 2008 Nokia Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 /* #define VERBOSE_DEBUG */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/device.h> 20 #include <linux/ctype.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/if_vlan.h> 24 25 #include "u_ether.h" 26 27 28 /* 29 * This component encapsulates the Ethernet link glue needed to provide 30 * one (!) network link through the USB gadget stack, normally "usb0". 31 * 32 * The control and data models are handled by the function driver which 33 * connects to this code; such as CDC Ethernet (ECM or EEM), 34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 35 * management. 36 * 37 * Link level addressing is handled by this component using module 38 * parameters; if no such parameters are provided, random link level 39 * addresses are used. Each end of the link uses one address. The 40 * host end address is exported in various ways, and is often recorded 41 * in configuration databases. 42 * 43 * The driver which assembles each configuration using such a link is 44 * responsible for ensuring that each configuration includes at most one 45 * instance of is network link. (The network layer provides ways for 46 * this single "physical" link to be used by multiple virtual links.) 47 */ 48 49 #define UETH__VERSION "29-May-2008" 50 51 struct eth_dev { 52 /* lock is held while accessing port_usb 53 */ 54 spinlock_t lock; 55 struct gether *port_usb; 56 57 struct net_device *net; 58 struct usb_gadget *gadget; 59 60 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 61 struct list_head tx_reqs, rx_reqs; 62 atomic_t tx_qlen; 63 64 struct sk_buff_head rx_frames; 65 66 unsigned qmult; 67 68 unsigned header_len; 69 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 70 int (*unwrap)(struct gether *, 71 struct sk_buff *skb, 72 struct sk_buff_head *list); 73 74 struct work_struct work; 75 76 unsigned long todo; 77 #define WORK_RX_MEMORY 0 78 79 bool zlp; 80 u8 host_mac[ETH_ALEN]; 81 u8 dev_mac[ETH_ALEN]; 82 }; 83 84 /*-------------------------------------------------------------------------*/ 85 86 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 87 88 #define DEFAULT_QLEN 2 /* double buffering by default */ 89 90 /* for dual-speed hardware, use deeper queues at high/super speed */ 91 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 92 { 93 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 94 gadget->speed == USB_SPEED_SUPER)) 95 return qmult * DEFAULT_QLEN; 96 else 97 return DEFAULT_QLEN; 98 } 99 100 /*-------------------------------------------------------------------------*/ 101 102 /* REVISIT there must be a better way than having two sets 103 * of debug calls ... 104 */ 105 106 #undef DBG 107 #undef VDBG 108 #undef ERROR 109 #undef INFO 110 111 #define xprintk(d, level, fmt, args...) \ 112 printk(level "%s: " fmt , (d)->net->name , ## args) 113 114 #ifdef DEBUG 115 #undef DEBUG 116 #define DBG(dev, fmt, args...) \ 117 xprintk(dev , KERN_DEBUG , fmt , ## args) 118 #else 119 #define DBG(dev, fmt, args...) \ 120 do { } while (0) 121 #endif /* DEBUG */ 122 123 #ifdef VERBOSE_DEBUG 124 #define VDBG DBG 125 #else 126 #define VDBG(dev, fmt, args...) \ 127 do { } while (0) 128 #endif /* DEBUG */ 129 130 #define ERROR(dev, fmt, args...) \ 131 xprintk(dev , KERN_ERR , fmt , ## args) 132 #define INFO(dev, fmt, args...) \ 133 xprintk(dev , KERN_INFO , fmt , ## args) 134 135 /*-------------------------------------------------------------------------*/ 136 137 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 138 139 static int ueth_change_mtu(struct net_device *net, int new_mtu) 140 { 141 struct eth_dev *dev = netdev_priv(net); 142 unsigned long flags; 143 int status = 0; 144 145 /* don't change MTU on "live" link (peer won't know) */ 146 spin_lock_irqsave(&dev->lock, flags); 147 if (dev->port_usb) 148 status = -EBUSY; 149 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN) 150 status = -ERANGE; 151 else 152 net->mtu = new_mtu; 153 spin_unlock_irqrestore(&dev->lock, flags); 154 155 return status; 156 } 157 158 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 159 { 160 struct eth_dev *dev = netdev_priv(net); 161 162 strlcpy(p->driver, "g_ether", sizeof(p->driver)); 163 strlcpy(p->version, UETH__VERSION, sizeof(p->version)); 164 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 165 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 166 } 167 168 /* REVISIT can also support: 169 * - WOL (by tracking suspends and issuing remote wakeup) 170 * - msglevel (implies updated messaging) 171 * - ... probably more ethtool ops 172 */ 173 174 static const struct ethtool_ops ops = { 175 .get_drvinfo = eth_get_drvinfo, 176 .get_link = ethtool_op_get_link, 177 }; 178 179 static void defer_kevent(struct eth_dev *dev, int flag) 180 { 181 if (test_and_set_bit(flag, &dev->todo)) 182 return; 183 if (!schedule_work(&dev->work)) 184 ERROR(dev, "kevent %d may have been dropped\n", flag); 185 else 186 DBG(dev, "kevent %d scheduled\n", flag); 187 } 188 189 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 190 191 static int 192 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 193 { 194 struct sk_buff *skb; 195 int retval = -ENOMEM; 196 size_t size = 0; 197 struct usb_ep *out; 198 unsigned long flags; 199 200 spin_lock_irqsave(&dev->lock, flags); 201 if (dev->port_usb) 202 out = dev->port_usb->out_ep; 203 else 204 out = NULL; 205 spin_unlock_irqrestore(&dev->lock, flags); 206 207 if (!out) 208 return -ENOTCONN; 209 210 211 /* Padding up to RX_EXTRA handles minor disagreements with host. 212 * Normally we use the USB "terminate on short read" convention; 213 * so allow up to (N*maxpacket), since that memory is normally 214 * already allocated. Some hardware doesn't deal well with short 215 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 216 * byte off the end (to force hardware errors on overflow). 217 * 218 * RNDIS uses internal framing, and explicitly allows senders to 219 * pad to end-of-packet. That's potentially nice for speed, but 220 * means receivers can't recover lost synch on their own (because 221 * new packets don't only start after a short RX). 222 */ 223 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 224 size += dev->port_usb->header_len; 225 size += out->maxpacket - 1; 226 size -= size % out->maxpacket; 227 228 if (dev->port_usb->is_fixed) 229 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 230 231 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 232 if (skb == NULL) { 233 DBG(dev, "no rx skb\n"); 234 goto enomem; 235 } 236 237 /* Some platforms perform better when IP packets are aligned, 238 * but on at least one, checksumming fails otherwise. Note: 239 * RNDIS headers involve variable numbers of LE32 values. 240 */ 241 skb_reserve(skb, NET_IP_ALIGN); 242 243 req->buf = skb->data; 244 req->length = size; 245 req->complete = rx_complete; 246 req->context = skb; 247 248 retval = usb_ep_queue(out, req, gfp_flags); 249 if (retval == -ENOMEM) 250 enomem: 251 defer_kevent(dev, WORK_RX_MEMORY); 252 if (retval) { 253 DBG(dev, "rx submit --> %d\n", retval); 254 if (skb) 255 dev_kfree_skb_any(skb); 256 spin_lock_irqsave(&dev->req_lock, flags); 257 list_add(&req->list, &dev->rx_reqs); 258 spin_unlock_irqrestore(&dev->req_lock, flags); 259 } 260 return retval; 261 } 262 263 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 264 { 265 struct sk_buff *skb = req->context, *skb2; 266 struct eth_dev *dev = ep->driver_data; 267 int status = req->status; 268 269 switch (status) { 270 271 /* normal completion */ 272 case 0: 273 skb_put(skb, req->actual); 274 275 if (dev->unwrap) { 276 unsigned long flags; 277 278 spin_lock_irqsave(&dev->lock, flags); 279 if (dev->port_usb) { 280 status = dev->unwrap(dev->port_usb, 281 skb, 282 &dev->rx_frames); 283 } else { 284 dev_kfree_skb_any(skb); 285 status = -ENOTCONN; 286 } 287 spin_unlock_irqrestore(&dev->lock, flags); 288 } else { 289 skb_queue_tail(&dev->rx_frames, skb); 290 } 291 skb = NULL; 292 293 skb2 = skb_dequeue(&dev->rx_frames); 294 while (skb2) { 295 if (status < 0 296 || ETH_HLEN > skb2->len 297 || skb2->len > VLAN_ETH_FRAME_LEN) { 298 dev->net->stats.rx_errors++; 299 dev->net->stats.rx_length_errors++; 300 DBG(dev, "rx length %d\n", skb2->len); 301 dev_kfree_skb_any(skb2); 302 goto next_frame; 303 } 304 skb2->protocol = eth_type_trans(skb2, dev->net); 305 dev->net->stats.rx_packets++; 306 dev->net->stats.rx_bytes += skb2->len; 307 308 /* no buffer copies needed, unless hardware can't 309 * use skb buffers. 310 */ 311 status = netif_rx(skb2); 312 next_frame: 313 skb2 = skb_dequeue(&dev->rx_frames); 314 } 315 break; 316 317 /* software-driven interface shutdown */ 318 case -ECONNRESET: /* unlink */ 319 case -ESHUTDOWN: /* disconnect etc */ 320 VDBG(dev, "rx shutdown, code %d\n", status); 321 goto quiesce; 322 323 /* for hardware automagic (such as pxa) */ 324 case -ECONNABORTED: /* endpoint reset */ 325 DBG(dev, "rx %s reset\n", ep->name); 326 defer_kevent(dev, WORK_RX_MEMORY); 327 quiesce: 328 dev_kfree_skb_any(skb); 329 goto clean; 330 331 /* data overrun */ 332 case -EOVERFLOW: 333 dev->net->stats.rx_over_errors++; 334 /* FALLTHROUGH */ 335 336 default: 337 dev->net->stats.rx_errors++; 338 DBG(dev, "rx status %d\n", status); 339 break; 340 } 341 342 if (skb) 343 dev_kfree_skb_any(skb); 344 if (!netif_running(dev->net)) { 345 clean: 346 spin_lock(&dev->req_lock); 347 list_add(&req->list, &dev->rx_reqs); 348 spin_unlock(&dev->req_lock); 349 req = NULL; 350 } 351 if (req) 352 rx_submit(dev, req, GFP_ATOMIC); 353 } 354 355 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 356 { 357 unsigned i; 358 struct usb_request *req; 359 360 if (!n) 361 return -ENOMEM; 362 363 /* queue/recycle up to N requests */ 364 i = n; 365 list_for_each_entry(req, list, list) { 366 if (i-- == 0) 367 goto extra; 368 } 369 while (i--) { 370 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 371 if (!req) 372 return list_empty(list) ? -ENOMEM : 0; 373 list_add(&req->list, list); 374 } 375 return 0; 376 377 extra: 378 /* free extras */ 379 for (;;) { 380 struct list_head *next; 381 382 next = req->list.next; 383 list_del(&req->list); 384 usb_ep_free_request(ep, req); 385 386 if (next == list) 387 break; 388 389 req = container_of(next, struct usb_request, list); 390 } 391 return 0; 392 } 393 394 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 395 { 396 int status; 397 398 spin_lock(&dev->req_lock); 399 status = prealloc(&dev->tx_reqs, link->in_ep, n); 400 if (status < 0) 401 goto fail; 402 status = prealloc(&dev->rx_reqs, link->out_ep, n); 403 if (status < 0) 404 goto fail; 405 goto done; 406 fail: 407 DBG(dev, "can't alloc requests\n"); 408 done: 409 spin_unlock(&dev->req_lock); 410 return status; 411 } 412 413 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 414 { 415 struct usb_request *req; 416 unsigned long flags; 417 418 /* fill unused rxq slots with some skb */ 419 spin_lock_irqsave(&dev->req_lock, flags); 420 while (!list_empty(&dev->rx_reqs)) { 421 req = container_of(dev->rx_reqs.next, 422 struct usb_request, list); 423 list_del_init(&req->list); 424 spin_unlock_irqrestore(&dev->req_lock, flags); 425 426 if (rx_submit(dev, req, gfp_flags) < 0) { 427 defer_kevent(dev, WORK_RX_MEMORY); 428 return; 429 } 430 431 spin_lock_irqsave(&dev->req_lock, flags); 432 } 433 spin_unlock_irqrestore(&dev->req_lock, flags); 434 } 435 436 static void eth_work(struct work_struct *work) 437 { 438 struct eth_dev *dev = container_of(work, struct eth_dev, work); 439 440 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 441 if (netif_running(dev->net)) 442 rx_fill(dev, GFP_KERNEL); 443 } 444 445 if (dev->todo) 446 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 447 } 448 449 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 450 { 451 struct sk_buff *skb = req->context; 452 struct eth_dev *dev = ep->driver_data; 453 454 switch (req->status) { 455 default: 456 dev->net->stats.tx_errors++; 457 VDBG(dev, "tx err %d\n", req->status); 458 /* FALLTHROUGH */ 459 case -ECONNRESET: /* unlink */ 460 case -ESHUTDOWN: /* disconnect etc */ 461 break; 462 case 0: 463 dev->net->stats.tx_bytes += skb->len; 464 } 465 dev->net->stats.tx_packets++; 466 467 spin_lock(&dev->req_lock); 468 list_add(&req->list, &dev->tx_reqs); 469 spin_unlock(&dev->req_lock); 470 dev_kfree_skb_any(skb); 471 472 atomic_dec(&dev->tx_qlen); 473 if (netif_carrier_ok(dev->net)) 474 netif_wake_queue(dev->net); 475 } 476 477 static inline int is_promisc(u16 cdc_filter) 478 { 479 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 480 } 481 482 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 483 struct net_device *net) 484 { 485 struct eth_dev *dev = netdev_priv(net); 486 int length = 0; 487 int retval; 488 struct usb_request *req = NULL; 489 unsigned long flags; 490 struct usb_ep *in; 491 u16 cdc_filter; 492 493 spin_lock_irqsave(&dev->lock, flags); 494 if (dev->port_usb) { 495 in = dev->port_usb->in_ep; 496 cdc_filter = dev->port_usb->cdc_filter; 497 } else { 498 in = NULL; 499 cdc_filter = 0; 500 } 501 spin_unlock_irqrestore(&dev->lock, flags); 502 503 if (skb && !in) { 504 dev_kfree_skb_any(skb); 505 return NETDEV_TX_OK; 506 } 507 508 /* apply outgoing CDC or RNDIS filters */ 509 if (skb && !is_promisc(cdc_filter)) { 510 u8 *dest = skb->data; 511 512 if (is_multicast_ether_addr(dest)) { 513 u16 type; 514 515 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 516 * SET_ETHERNET_MULTICAST_FILTERS requests 517 */ 518 if (is_broadcast_ether_addr(dest)) 519 type = USB_CDC_PACKET_TYPE_BROADCAST; 520 else 521 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 522 if (!(cdc_filter & type)) { 523 dev_kfree_skb_any(skb); 524 return NETDEV_TX_OK; 525 } 526 } 527 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 528 } 529 530 spin_lock_irqsave(&dev->req_lock, flags); 531 /* 532 * this freelist can be empty if an interrupt triggered disconnect() 533 * and reconfigured the gadget (shutting down this queue) after the 534 * network stack decided to xmit but before we got the spinlock. 535 */ 536 if (list_empty(&dev->tx_reqs)) { 537 spin_unlock_irqrestore(&dev->req_lock, flags); 538 return NETDEV_TX_BUSY; 539 } 540 541 req = container_of(dev->tx_reqs.next, struct usb_request, list); 542 list_del(&req->list); 543 544 /* temporarily stop TX queue when the freelist empties */ 545 if (list_empty(&dev->tx_reqs)) 546 netif_stop_queue(net); 547 spin_unlock_irqrestore(&dev->req_lock, flags); 548 549 /* no buffer copies needed, unless the network stack did it 550 * or the hardware can't use skb buffers. 551 * or there's not enough space for extra headers we need 552 */ 553 if (dev->wrap) { 554 unsigned long flags; 555 556 spin_lock_irqsave(&dev->lock, flags); 557 if (dev->port_usb) 558 skb = dev->wrap(dev->port_usb, skb); 559 spin_unlock_irqrestore(&dev->lock, flags); 560 if (!skb) { 561 /* Multi frame CDC protocols may store the frame for 562 * later which is not a dropped frame. 563 */ 564 if (dev->port_usb->supports_multi_frame) 565 goto multiframe; 566 goto drop; 567 } 568 } 569 570 length = skb->len; 571 req->buf = skb->data; 572 req->context = skb; 573 req->complete = tx_complete; 574 575 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 576 if (dev->port_usb->is_fixed && 577 length == dev->port_usb->fixed_in_len && 578 (length % in->maxpacket) == 0) 579 req->zero = 0; 580 else 581 req->zero = 1; 582 583 /* use zlp framing on tx for strict CDC-Ether conformance, 584 * though any robust network rx path ignores extra padding. 585 * and some hardware doesn't like to write zlps. 586 */ 587 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 588 length++; 589 590 req->length = length; 591 592 /* throttle high/super speed IRQ rate back slightly */ 593 if (gadget_is_dualspeed(dev->gadget)) 594 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 595 dev->gadget->speed == USB_SPEED_SUPER) 596 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) 597 : 0; 598 599 retval = usb_ep_queue(in, req, GFP_ATOMIC); 600 switch (retval) { 601 default: 602 DBG(dev, "tx queue err %d\n", retval); 603 break; 604 case 0: 605 net->trans_start = jiffies; 606 atomic_inc(&dev->tx_qlen); 607 } 608 609 if (retval) { 610 dev_kfree_skb_any(skb); 611 drop: 612 dev->net->stats.tx_dropped++; 613 multiframe: 614 spin_lock_irqsave(&dev->req_lock, flags); 615 if (list_empty(&dev->tx_reqs)) 616 netif_start_queue(net); 617 list_add(&req->list, &dev->tx_reqs); 618 spin_unlock_irqrestore(&dev->req_lock, flags); 619 } 620 return NETDEV_TX_OK; 621 } 622 623 /*-------------------------------------------------------------------------*/ 624 625 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 626 { 627 DBG(dev, "%s\n", __func__); 628 629 /* fill the rx queue */ 630 rx_fill(dev, gfp_flags); 631 632 /* and open the tx floodgates */ 633 atomic_set(&dev->tx_qlen, 0); 634 netif_wake_queue(dev->net); 635 } 636 637 static int eth_open(struct net_device *net) 638 { 639 struct eth_dev *dev = netdev_priv(net); 640 struct gether *link; 641 642 DBG(dev, "%s\n", __func__); 643 if (netif_carrier_ok(dev->net)) 644 eth_start(dev, GFP_KERNEL); 645 646 spin_lock_irq(&dev->lock); 647 link = dev->port_usb; 648 if (link && link->open) 649 link->open(link); 650 spin_unlock_irq(&dev->lock); 651 652 return 0; 653 } 654 655 static int eth_stop(struct net_device *net) 656 { 657 struct eth_dev *dev = netdev_priv(net); 658 unsigned long flags; 659 660 VDBG(dev, "%s\n", __func__); 661 netif_stop_queue(net); 662 663 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 664 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 665 dev->net->stats.rx_errors, dev->net->stats.tx_errors 666 ); 667 668 /* ensure there are no more active requests */ 669 spin_lock_irqsave(&dev->lock, flags); 670 if (dev->port_usb) { 671 struct gether *link = dev->port_usb; 672 const struct usb_endpoint_descriptor *in; 673 const struct usb_endpoint_descriptor *out; 674 675 if (link->close) 676 link->close(link); 677 678 /* NOTE: we have no abort-queue primitive we could use 679 * to cancel all pending I/O. Instead, we disable then 680 * reenable the endpoints ... this idiom may leave toggle 681 * wrong, but that's a self-correcting error. 682 * 683 * REVISIT: we *COULD* just let the transfers complete at 684 * their own pace; the network stack can handle old packets. 685 * For the moment we leave this here, since it works. 686 */ 687 in = link->in_ep->desc; 688 out = link->out_ep->desc; 689 usb_ep_disable(link->in_ep); 690 usb_ep_disable(link->out_ep); 691 if (netif_carrier_ok(net)) { 692 DBG(dev, "host still using in/out endpoints\n"); 693 link->in_ep->desc = in; 694 link->out_ep->desc = out; 695 usb_ep_enable(link->in_ep); 696 usb_ep_enable(link->out_ep); 697 } 698 } 699 spin_unlock_irqrestore(&dev->lock, flags); 700 701 return 0; 702 } 703 704 /*-------------------------------------------------------------------------*/ 705 706 static int get_ether_addr(const char *str, u8 *dev_addr) 707 { 708 if (str) { 709 unsigned i; 710 711 for (i = 0; i < 6; i++) { 712 unsigned char num; 713 714 if ((*str == '.') || (*str == ':')) 715 str++; 716 num = hex_to_bin(*str++) << 4; 717 num |= hex_to_bin(*str++); 718 dev_addr [i] = num; 719 } 720 if (is_valid_ether_addr(dev_addr)) 721 return 0; 722 } 723 eth_random_addr(dev_addr); 724 return 1; 725 } 726 727 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 728 { 729 if (len < 18) 730 return -EINVAL; 731 732 snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x", 733 dev_addr[0], dev_addr[1], dev_addr[2], 734 dev_addr[3], dev_addr[4], dev_addr[5]); 735 return 18; 736 } 737 738 static const struct net_device_ops eth_netdev_ops = { 739 .ndo_open = eth_open, 740 .ndo_stop = eth_stop, 741 .ndo_start_xmit = eth_start_xmit, 742 .ndo_change_mtu = ueth_change_mtu, 743 .ndo_set_mac_address = eth_mac_addr, 744 .ndo_validate_addr = eth_validate_addr, 745 }; 746 747 static struct device_type gadget_type = { 748 .name = "gadget", 749 }; 750 751 /** 752 * gether_setup_name - initialize one ethernet-over-usb link 753 * @g: gadget to associated with these links 754 * @ethaddr: NULL, or a buffer in which the ethernet address of the 755 * host side of the link is recorded 756 * @netname: name for network device (for example, "usb") 757 * Context: may sleep 758 * 759 * This sets up the single network link that may be exported by a 760 * gadget driver using this framework. The link layer addresses are 761 * set up using module parameters. 762 * 763 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 764 */ 765 struct eth_dev *gether_setup_name(struct usb_gadget *g, 766 const char *dev_addr, const char *host_addr, 767 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 768 { 769 struct eth_dev *dev; 770 struct net_device *net; 771 int status; 772 773 net = alloc_etherdev(sizeof *dev); 774 if (!net) 775 return ERR_PTR(-ENOMEM); 776 777 dev = netdev_priv(net); 778 spin_lock_init(&dev->lock); 779 spin_lock_init(&dev->req_lock); 780 INIT_WORK(&dev->work, eth_work); 781 INIT_LIST_HEAD(&dev->tx_reqs); 782 INIT_LIST_HEAD(&dev->rx_reqs); 783 784 skb_queue_head_init(&dev->rx_frames); 785 786 /* network device setup */ 787 dev->net = net; 788 dev->qmult = qmult; 789 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 790 791 if (get_ether_addr(dev_addr, net->dev_addr)) 792 dev_warn(&g->dev, 793 "using random %s ethernet address\n", "self"); 794 if (get_ether_addr(host_addr, dev->host_mac)) 795 dev_warn(&g->dev, 796 "using random %s ethernet address\n", "host"); 797 798 if (ethaddr) 799 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 800 801 net->netdev_ops = ð_netdev_ops; 802 803 net->ethtool_ops = &ops; 804 805 dev->gadget = g; 806 SET_NETDEV_DEV(net, &g->dev); 807 SET_NETDEV_DEVTYPE(net, &gadget_type); 808 809 status = register_netdev(net); 810 if (status < 0) { 811 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 812 free_netdev(net); 813 dev = ERR_PTR(status); 814 } else { 815 INFO(dev, "MAC %pM\n", net->dev_addr); 816 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 817 818 /* 819 * two kinds of host-initiated state changes: 820 * - iff DATA transfer is active, carrier is "on" 821 * - tx queueing enabled if open *and* carrier is "on" 822 */ 823 netif_carrier_off(net); 824 } 825 826 return dev; 827 } 828 EXPORT_SYMBOL_GPL(gether_setup_name); 829 830 struct net_device *gether_setup_name_default(const char *netname) 831 { 832 struct net_device *net; 833 struct eth_dev *dev; 834 835 net = alloc_etherdev(sizeof(*dev)); 836 if (!net) 837 return ERR_PTR(-ENOMEM); 838 839 dev = netdev_priv(net); 840 spin_lock_init(&dev->lock); 841 spin_lock_init(&dev->req_lock); 842 INIT_WORK(&dev->work, eth_work); 843 INIT_LIST_HEAD(&dev->tx_reqs); 844 INIT_LIST_HEAD(&dev->rx_reqs); 845 846 skb_queue_head_init(&dev->rx_frames); 847 848 /* network device setup */ 849 dev->net = net; 850 dev->qmult = QMULT_DEFAULT; 851 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 852 853 eth_random_addr(dev->dev_mac); 854 pr_warn("using random %s ethernet address\n", "self"); 855 eth_random_addr(dev->host_mac); 856 pr_warn("using random %s ethernet address\n", "host"); 857 858 net->netdev_ops = ð_netdev_ops; 859 860 net->ethtool_ops = &ops; 861 SET_NETDEV_DEVTYPE(net, &gadget_type); 862 863 return net; 864 } 865 EXPORT_SYMBOL_GPL(gether_setup_name_default); 866 867 int gether_register_netdev(struct net_device *net) 868 { 869 struct eth_dev *dev; 870 struct usb_gadget *g; 871 struct sockaddr sa; 872 int status; 873 874 if (!net->dev.parent) 875 return -EINVAL; 876 dev = netdev_priv(net); 877 g = dev->gadget; 878 status = register_netdev(net); 879 if (status < 0) { 880 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 881 return status; 882 } else { 883 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 884 885 /* two kinds of host-initiated state changes: 886 * - iff DATA transfer is active, carrier is "on" 887 * - tx queueing enabled if open *and* carrier is "on" 888 */ 889 netif_carrier_off(net); 890 } 891 sa.sa_family = net->type; 892 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); 893 rtnl_lock(); 894 status = dev_set_mac_address(net, &sa); 895 rtnl_unlock(); 896 if (status) 897 pr_warn("cannot set self ethernet address: %d\n", status); 898 else 899 INFO(dev, "MAC %pM\n", dev->dev_mac); 900 901 return status; 902 } 903 EXPORT_SYMBOL_GPL(gether_register_netdev); 904 905 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 906 { 907 struct eth_dev *dev; 908 909 dev = netdev_priv(net); 910 dev->gadget = g; 911 SET_NETDEV_DEV(net, &g->dev); 912 } 913 EXPORT_SYMBOL_GPL(gether_set_gadget); 914 915 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 916 { 917 struct eth_dev *dev; 918 u8 new_addr[ETH_ALEN]; 919 920 dev = netdev_priv(net); 921 if (get_ether_addr(dev_addr, new_addr)) 922 return -EINVAL; 923 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 924 return 0; 925 } 926 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 927 928 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 929 { 930 struct eth_dev *dev; 931 932 dev = netdev_priv(net); 933 return get_ether_addr_str(dev->dev_mac, dev_addr, len); 934 } 935 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 936 937 int gether_set_host_addr(struct net_device *net, const char *host_addr) 938 { 939 struct eth_dev *dev; 940 u8 new_addr[ETH_ALEN]; 941 942 dev = netdev_priv(net); 943 if (get_ether_addr(host_addr, new_addr)) 944 return -EINVAL; 945 memcpy(dev->host_mac, new_addr, ETH_ALEN); 946 return 0; 947 } 948 EXPORT_SYMBOL_GPL(gether_set_host_addr); 949 950 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 951 { 952 struct eth_dev *dev; 953 954 dev = netdev_priv(net); 955 return get_ether_addr_str(dev->host_mac, host_addr, len); 956 } 957 EXPORT_SYMBOL_GPL(gether_get_host_addr); 958 959 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 960 { 961 struct eth_dev *dev; 962 963 if (len < 13) 964 return -EINVAL; 965 966 dev = netdev_priv(net); 967 snprintf(host_addr, len, "%pm", dev->host_mac); 968 969 return strlen(host_addr); 970 } 971 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 972 973 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 974 { 975 struct eth_dev *dev; 976 977 dev = netdev_priv(net); 978 memcpy(host_mac, dev->host_mac, ETH_ALEN); 979 } 980 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 981 982 void gether_set_qmult(struct net_device *net, unsigned qmult) 983 { 984 struct eth_dev *dev; 985 986 dev = netdev_priv(net); 987 dev->qmult = qmult; 988 } 989 EXPORT_SYMBOL_GPL(gether_set_qmult); 990 991 unsigned gether_get_qmult(struct net_device *net) 992 { 993 struct eth_dev *dev; 994 995 dev = netdev_priv(net); 996 return dev->qmult; 997 } 998 EXPORT_SYMBOL_GPL(gether_get_qmult); 999 1000 int gether_get_ifname(struct net_device *net, char *name, int len) 1001 { 1002 rtnl_lock(); 1003 strlcpy(name, netdev_name(net), len); 1004 rtnl_unlock(); 1005 return strlen(name); 1006 } 1007 EXPORT_SYMBOL_GPL(gether_get_ifname); 1008 1009 /** 1010 * gether_cleanup - remove Ethernet-over-USB device 1011 * Context: may sleep 1012 * 1013 * This is called to free all resources allocated by @gether_setup(). 1014 */ 1015 void gether_cleanup(struct eth_dev *dev) 1016 { 1017 if (!dev) 1018 return; 1019 1020 unregister_netdev(dev->net); 1021 flush_work(&dev->work); 1022 free_netdev(dev->net); 1023 } 1024 EXPORT_SYMBOL_GPL(gether_cleanup); 1025 1026 /** 1027 * gether_connect - notify network layer that USB link is active 1028 * @link: the USB link, set up with endpoints, descriptors matching 1029 * current device speed, and any framing wrapper(s) set up. 1030 * Context: irqs blocked 1031 * 1032 * This is called to activate endpoints and let the network layer know 1033 * the connection is active ("carrier detect"). It may cause the I/O 1034 * queues to open and start letting network packets flow, but will in 1035 * any case activate the endpoints so that they respond properly to the 1036 * USB host. 1037 * 1038 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1039 * indicate some error code (negative errno), ep->driver_data values 1040 * have been overwritten. 1041 */ 1042 struct net_device *gether_connect(struct gether *link) 1043 { 1044 struct eth_dev *dev = link->ioport; 1045 int result = 0; 1046 1047 if (!dev) 1048 return ERR_PTR(-EINVAL); 1049 1050 link->in_ep->driver_data = dev; 1051 result = usb_ep_enable(link->in_ep); 1052 if (result != 0) { 1053 DBG(dev, "enable %s --> %d\n", 1054 link->in_ep->name, result); 1055 goto fail0; 1056 } 1057 1058 link->out_ep->driver_data = dev; 1059 result = usb_ep_enable(link->out_ep); 1060 if (result != 0) { 1061 DBG(dev, "enable %s --> %d\n", 1062 link->out_ep->name, result); 1063 goto fail1; 1064 } 1065 1066 if (result == 0) 1067 result = alloc_requests(dev, link, qlen(dev->gadget, 1068 dev->qmult)); 1069 1070 if (result == 0) { 1071 dev->zlp = link->is_zlp_ok; 1072 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1073 1074 dev->header_len = link->header_len; 1075 dev->unwrap = link->unwrap; 1076 dev->wrap = link->wrap; 1077 1078 spin_lock(&dev->lock); 1079 dev->port_usb = link; 1080 if (netif_running(dev->net)) { 1081 if (link->open) 1082 link->open(link); 1083 } else { 1084 if (link->close) 1085 link->close(link); 1086 } 1087 spin_unlock(&dev->lock); 1088 1089 netif_carrier_on(dev->net); 1090 if (netif_running(dev->net)) 1091 eth_start(dev, GFP_ATOMIC); 1092 1093 /* on error, disable any endpoints */ 1094 } else { 1095 (void) usb_ep_disable(link->out_ep); 1096 fail1: 1097 (void) usb_ep_disable(link->in_ep); 1098 } 1099 fail0: 1100 /* caller is responsible for cleanup on error */ 1101 if (result < 0) 1102 return ERR_PTR(result); 1103 return dev->net; 1104 } 1105 EXPORT_SYMBOL_GPL(gether_connect); 1106 1107 /** 1108 * gether_disconnect - notify network layer that USB link is inactive 1109 * @link: the USB link, on which gether_connect() was called 1110 * Context: irqs blocked 1111 * 1112 * This is called to deactivate endpoints and let the network layer know 1113 * the connection went inactive ("no carrier"). 1114 * 1115 * On return, the state is as if gether_connect() had never been called. 1116 * The endpoints are inactive, and accordingly without active USB I/O. 1117 * Pointers to endpoint descriptors and endpoint private data are nulled. 1118 */ 1119 void gether_disconnect(struct gether *link) 1120 { 1121 struct eth_dev *dev = link->ioport; 1122 struct usb_request *req; 1123 1124 WARN_ON(!dev); 1125 if (!dev) 1126 return; 1127 1128 DBG(dev, "%s\n", __func__); 1129 1130 netif_stop_queue(dev->net); 1131 netif_carrier_off(dev->net); 1132 1133 /* disable endpoints, forcing (synchronous) completion 1134 * of all pending i/o. then free the request objects 1135 * and forget about the endpoints. 1136 */ 1137 usb_ep_disable(link->in_ep); 1138 spin_lock(&dev->req_lock); 1139 while (!list_empty(&dev->tx_reqs)) { 1140 req = container_of(dev->tx_reqs.next, 1141 struct usb_request, list); 1142 list_del(&req->list); 1143 1144 spin_unlock(&dev->req_lock); 1145 usb_ep_free_request(link->in_ep, req); 1146 spin_lock(&dev->req_lock); 1147 } 1148 spin_unlock(&dev->req_lock); 1149 link->in_ep->driver_data = NULL; 1150 link->in_ep->desc = NULL; 1151 1152 usb_ep_disable(link->out_ep); 1153 spin_lock(&dev->req_lock); 1154 while (!list_empty(&dev->rx_reqs)) { 1155 req = container_of(dev->rx_reqs.next, 1156 struct usb_request, list); 1157 list_del(&req->list); 1158 1159 spin_unlock(&dev->req_lock); 1160 usb_ep_free_request(link->out_ep, req); 1161 spin_lock(&dev->req_lock); 1162 } 1163 spin_unlock(&dev->req_lock); 1164 link->out_ep->driver_data = NULL; 1165 link->out_ep->desc = NULL; 1166 1167 /* finish forgetting about this USB link episode */ 1168 dev->header_len = 0; 1169 dev->unwrap = NULL; 1170 dev->wrap = NULL; 1171 1172 spin_lock(&dev->lock); 1173 dev->port_usb = NULL; 1174 spin_unlock(&dev->lock); 1175 } 1176 EXPORT_SYMBOL_GPL(gether_disconnect); 1177 1178 MODULE_LICENSE("GPL"); 1179 MODULE_AUTHOR("David Brownell"); 1180