1 /* 2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 3 * 4 * Copyright (C) 2003-2005,2008 David Brownell 5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 * Copyright (C) 2008 Nokia Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 /* #define VERBOSE_DEBUG */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/device.h> 20 #include <linux/ctype.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/if_vlan.h> 24 25 #include "u_ether.h" 26 27 28 /* 29 * This component encapsulates the Ethernet link glue needed to provide 30 * one (!) network link through the USB gadget stack, normally "usb0". 31 * 32 * The control and data models are handled by the function driver which 33 * connects to this code; such as CDC Ethernet (ECM or EEM), 34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 35 * management. 36 * 37 * Link level addressing is handled by this component using module 38 * parameters; if no such parameters are provided, random link level 39 * addresses are used. Each end of the link uses one address. The 40 * host end address is exported in various ways, and is often recorded 41 * in configuration databases. 42 * 43 * The driver which assembles each configuration using such a link is 44 * responsible for ensuring that each configuration includes at most one 45 * instance of is network link. (The network layer provides ways for 46 * this single "physical" link to be used by multiple virtual links.) 47 */ 48 49 #define UETH__VERSION "29-May-2008" 50 51 struct eth_dev { 52 /* lock is held while accessing port_usb 53 */ 54 spinlock_t lock; 55 struct gether *port_usb; 56 57 struct net_device *net; 58 struct usb_gadget *gadget; 59 60 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 61 struct list_head tx_reqs, rx_reqs; 62 atomic_t tx_qlen; 63 64 struct sk_buff_head rx_frames; 65 66 unsigned qmult; 67 68 unsigned header_len; 69 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 70 int (*unwrap)(struct gether *, 71 struct sk_buff *skb, 72 struct sk_buff_head *list); 73 74 struct work_struct work; 75 76 unsigned long todo; 77 #define WORK_RX_MEMORY 0 78 79 bool zlp; 80 u8 host_mac[ETH_ALEN]; 81 u8 dev_mac[ETH_ALEN]; 82 }; 83 84 /*-------------------------------------------------------------------------*/ 85 86 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 87 88 #define DEFAULT_QLEN 2 /* double buffering by default */ 89 90 /* for dual-speed hardware, use deeper queues at high/super speed */ 91 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 92 { 93 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 94 gadget->speed == USB_SPEED_SUPER)) 95 return qmult * DEFAULT_QLEN; 96 else 97 return DEFAULT_QLEN; 98 } 99 100 /*-------------------------------------------------------------------------*/ 101 102 /* REVISIT there must be a better way than having two sets 103 * of debug calls ... 104 */ 105 106 #undef DBG 107 #undef VDBG 108 #undef ERROR 109 #undef INFO 110 111 #define xprintk(d, level, fmt, args...) \ 112 printk(level "%s: " fmt , (d)->net->name , ## args) 113 114 #ifdef DEBUG 115 #undef DEBUG 116 #define DBG(dev, fmt, args...) \ 117 xprintk(dev , KERN_DEBUG , fmt , ## args) 118 #else 119 #define DBG(dev, fmt, args...) \ 120 do { } while (0) 121 #endif /* DEBUG */ 122 123 #ifdef VERBOSE_DEBUG 124 #define VDBG DBG 125 #else 126 #define VDBG(dev, fmt, args...) \ 127 do { } while (0) 128 #endif /* DEBUG */ 129 130 #define ERROR(dev, fmt, args...) \ 131 xprintk(dev , KERN_ERR , fmt , ## args) 132 #define INFO(dev, fmt, args...) \ 133 xprintk(dev , KERN_INFO , fmt , ## args) 134 135 /*-------------------------------------------------------------------------*/ 136 137 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 138 139 static int ueth_change_mtu(struct net_device *net, int new_mtu) 140 { 141 struct eth_dev *dev = netdev_priv(net); 142 unsigned long flags; 143 int status = 0; 144 145 /* don't change MTU on "live" link (peer won't know) */ 146 spin_lock_irqsave(&dev->lock, flags); 147 if (dev->port_usb) 148 status = -EBUSY; 149 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN) 150 status = -ERANGE; 151 else 152 net->mtu = new_mtu; 153 spin_unlock_irqrestore(&dev->lock, flags); 154 155 return status; 156 } 157 158 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 159 { 160 struct eth_dev *dev = netdev_priv(net); 161 162 strlcpy(p->driver, "g_ether", sizeof(p->driver)); 163 strlcpy(p->version, UETH__VERSION, sizeof(p->version)); 164 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 165 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 166 } 167 168 /* REVISIT can also support: 169 * - WOL (by tracking suspends and issuing remote wakeup) 170 * - msglevel (implies updated messaging) 171 * - ... probably more ethtool ops 172 */ 173 174 static const struct ethtool_ops ops = { 175 .get_drvinfo = eth_get_drvinfo, 176 .get_link = ethtool_op_get_link, 177 }; 178 179 static void defer_kevent(struct eth_dev *dev, int flag) 180 { 181 if (test_and_set_bit(flag, &dev->todo)) 182 return; 183 if (!schedule_work(&dev->work)) 184 ERROR(dev, "kevent %d may have been dropped\n", flag); 185 else 186 DBG(dev, "kevent %d scheduled\n", flag); 187 } 188 189 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 190 191 static int 192 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 193 { 194 struct sk_buff *skb; 195 int retval = -ENOMEM; 196 size_t size = 0; 197 struct usb_ep *out; 198 unsigned long flags; 199 200 spin_lock_irqsave(&dev->lock, flags); 201 if (dev->port_usb) 202 out = dev->port_usb->out_ep; 203 else 204 out = NULL; 205 spin_unlock_irqrestore(&dev->lock, flags); 206 207 if (!out) 208 return -ENOTCONN; 209 210 211 /* Padding up to RX_EXTRA handles minor disagreements with host. 212 * Normally we use the USB "terminate on short read" convention; 213 * so allow up to (N*maxpacket), since that memory is normally 214 * already allocated. Some hardware doesn't deal well with short 215 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 216 * byte off the end (to force hardware errors on overflow). 217 * 218 * RNDIS uses internal framing, and explicitly allows senders to 219 * pad to end-of-packet. That's potentially nice for speed, but 220 * means receivers can't recover lost synch on their own (because 221 * new packets don't only start after a short RX). 222 */ 223 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 224 size += dev->port_usb->header_len; 225 size += out->maxpacket - 1; 226 size -= size % out->maxpacket; 227 228 if (dev->port_usb->is_fixed) 229 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 230 231 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 232 if (skb == NULL) { 233 DBG(dev, "no rx skb\n"); 234 goto enomem; 235 } 236 237 /* Some platforms perform better when IP packets are aligned, 238 * but on at least one, checksumming fails otherwise. Note: 239 * RNDIS headers involve variable numbers of LE32 values. 240 */ 241 skb_reserve(skb, NET_IP_ALIGN); 242 243 req->buf = skb->data; 244 req->length = size; 245 req->complete = rx_complete; 246 req->context = skb; 247 248 retval = usb_ep_queue(out, req, gfp_flags); 249 if (retval == -ENOMEM) 250 enomem: 251 defer_kevent(dev, WORK_RX_MEMORY); 252 if (retval) { 253 DBG(dev, "rx submit --> %d\n", retval); 254 if (skb) 255 dev_kfree_skb_any(skb); 256 spin_lock_irqsave(&dev->req_lock, flags); 257 list_add(&req->list, &dev->rx_reqs); 258 spin_unlock_irqrestore(&dev->req_lock, flags); 259 } 260 return retval; 261 } 262 263 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 264 { 265 struct sk_buff *skb = req->context, *skb2; 266 struct eth_dev *dev = ep->driver_data; 267 int status = req->status; 268 269 switch (status) { 270 271 /* normal completion */ 272 case 0: 273 skb_put(skb, req->actual); 274 275 if (dev->unwrap) { 276 unsigned long flags; 277 278 spin_lock_irqsave(&dev->lock, flags); 279 if (dev->port_usb) { 280 status = dev->unwrap(dev->port_usb, 281 skb, 282 &dev->rx_frames); 283 } else { 284 dev_kfree_skb_any(skb); 285 status = -ENOTCONN; 286 } 287 spin_unlock_irqrestore(&dev->lock, flags); 288 } else { 289 skb_queue_tail(&dev->rx_frames, skb); 290 } 291 skb = NULL; 292 293 skb2 = skb_dequeue(&dev->rx_frames); 294 while (skb2) { 295 if (status < 0 296 || ETH_HLEN > skb2->len 297 || skb2->len > VLAN_ETH_FRAME_LEN) { 298 dev->net->stats.rx_errors++; 299 dev->net->stats.rx_length_errors++; 300 DBG(dev, "rx length %d\n", skb2->len); 301 dev_kfree_skb_any(skb2); 302 goto next_frame; 303 } 304 skb2->protocol = eth_type_trans(skb2, dev->net); 305 dev->net->stats.rx_packets++; 306 dev->net->stats.rx_bytes += skb2->len; 307 308 /* no buffer copies needed, unless hardware can't 309 * use skb buffers. 310 */ 311 status = netif_rx(skb2); 312 next_frame: 313 skb2 = skb_dequeue(&dev->rx_frames); 314 } 315 break; 316 317 /* software-driven interface shutdown */ 318 case -ECONNRESET: /* unlink */ 319 case -ESHUTDOWN: /* disconnect etc */ 320 VDBG(dev, "rx shutdown, code %d\n", status); 321 goto quiesce; 322 323 /* for hardware automagic (such as pxa) */ 324 case -ECONNABORTED: /* endpoint reset */ 325 DBG(dev, "rx %s reset\n", ep->name); 326 defer_kevent(dev, WORK_RX_MEMORY); 327 quiesce: 328 dev_kfree_skb_any(skb); 329 goto clean; 330 331 /* data overrun */ 332 case -EOVERFLOW: 333 dev->net->stats.rx_over_errors++; 334 /* FALLTHROUGH */ 335 336 default: 337 dev->net->stats.rx_errors++; 338 DBG(dev, "rx status %d\n", status); 339 break; 340 } 341 342 if (skb) 343 dev_kfree_skb_any(skb); 344 if (!netif_running(dev->net)) { 345 clean: 346 spin_lock(&dev->req_lock); 347 list_add(&req->list, &dev->rx_reqs); 348 spin_unlock(&dev->req_lock); 349 req = NULL; 350 } 351 if (req) 352 rx_submit(dev, req, GFP_ATOMIC); 353 } 354 355 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 356 { 357 unsigned i; 358 struct usb_request *req; 359 360 if (!n) 361 return -ENOMEM; 362 363 /* queue/recycle up to N requests */ 364 i = n; 365 list_for_each_entry(req, list, list) { 366 if (i-- == 0) 367 goto extra; 368 } 369 while (i--) { 370 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 371 if (!req) 372 return list_empty(list) ? -ENOMEM : 0; 373 list_add(&req->list, list); 374 } 375 return 0; 376 377 extra: 378 /* free extras */ 379 for (;;) { 380 struct list_head *next; 381 382 next = req->list.next; 383 list_del(&req->list); 384 usb_ep_free_request(ep, req); 385 386 if (next == list) 387 break; 388 389 req = container_of(next, struct usb_request, list); 390 } 391 return 0; 392 } 393 394 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 395 { 396 int status; 397 398 spin_lock(&dev->req_lock); 399 status = prealloc(&dev->tx_reqs, link->in_ep, n); 400 if (status < 0) 401 goto fail; 402 status = prealloc(&dev->rx_reqs, link->out_ep, n); 403 if (status < 0) 404 goto fail; 405 goto done; 406 fail: 407 DBG(dev, "can't alloc requests\n"); 408 done: 409 spin_unlock(&dev->req_lock); 410 return status; 411 } 412 413 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 414 { 415 struct usb_request *req; 416 unsigned long flags; 417 418 /* fill unused rxq slots with some skb */ 419 spin_lock_irqsave(&dev->req_lock, flags); 420 while (!list_empty(&dev->rx_reqs)) { 421 req = container_of(dev->rx_reqs.next, 422 struct usb_request, list); 423 list_del_init(&req->list); 424 spin_unlock_irqrestore(&dev->req_lock, flags); 425 426 if (rx_submit(dev, req, gfp_flags) < 0) { 427 defer_kevent(dev, WORK_RX_MEMORY); 428 return; 429 } 430 431 spin_lock_irqsave(&dev->req_lock, flags); 432 } 433 spin_unlock_irqrestore(&dev->req_lock, flags); 434 } 435 436 static void eth_work(struct work_struct *work) 437 { 438 struct eth_dev *dev = container_of(work, struct eth_dev, work); 439 440 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 441 if (netif_running(dev->net)) 442 rx_fill(dev, GFP_KERNEL); 443 } 444 445 if (dev->todo) 446 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 447 } 448 449 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 450 { 451 struct sk_buff *skb = req->context; 452 struct eth_dev *dev = ep->driver_data; 453 454 switch (req->status) { 455 default: 456 dev->net->stats.tx_errors++; 457 VDBG(dev, "tx err %d\n", req->status); 458 /* FALLTHROUGH */ 459 case -ECONNRESET: /* unlink */ 460 case -ESHUTDOWN: /* disconnect etc */ 461 break; 462 case 0: 463 dev->net->stats.tx_bytes += skb->len; 464 } 465 dev->net->stats.tx_packets++; 466 467 spin_lock(&dev->req_lock); 468 list_add(&req->list, &dev->tx_reqs); 469 spin_unlock(&dev->req_lock); 470 dev_kfree_skb_any(skb); 471 472 atomic_dec(&dev->tx_qlen); 473 if (netif_carrier_ok(dev->net)) 474 netif_wake_queue(dev->net); 475 } 476 477 static inline int is_promisc(u16 cdc_filter) 478 { 479 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 480 } 481 482 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 483 struct net_device *net) 484 { 485 struct eth_dev *dev = netdev_priv(net); 486 int length = 0; 487 int retval; 488 struct usb_request *req = NULL; 489 unsigned long flags; 490 struct usb_ep *in; 491 u16 cdc_filter; 492 493 spin_lock_irqsave(&dev->lock, flags); 494 if (dev->port_usb) { 495 in = dev->port_usb->in_ep; 496 cdc_filter = dev->port_usb->cdc_filter; 497 } else { 498 in = NULL; 499 cdc_filter = 0; 500 } 501 spin_unlock_irqrestore(&dev->lock, flags); 502 503 if (skb && !in) { 504 dev_kfree_skb_any(skb); 505 return NETDEV_TX_OK; 506 } 507 508 /* apply outgoing CDC or RNDIS filters */ 509 if (skb && !is_promisc(cdc_filter)) { 510 u8 *dest = skb->data; 511 512 if (is_multicast_ether_addr(dest)) { 513 u16 type; 514 515 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 516 * SET_ETHERNET_MULTICAST_FILTERS requests 517 */ 518 if (is_broadcast_ether_addr(dest)) 519 type = USB_CDC_PACKET_TYPE_BROADCAST; 520 else 521 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 522 if (!(cdc_filter & type)) { 523 dev_kfree_skb_any(skb); 524 return NETDEV_TX_OK; 525 } 526 } 527 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 528 } 529 530 spin_lock_irqsave(&dev->req_lock, flags); 531 /* 532 * this freelist can be empty if an interrupt triggered disconnect() 533 * and reconfigured the gadget (shutting down this queue) after the 534 * network stack decided to xmit but before we got the spinlock. 535 */ 536 if (list_empty(&dev->tx_reqs)) { 537 spin_unlock_irqrestore(&dev->req_lock, flags); 538 return NETDEV_TX_BUSY; 539 } 540 541 req = container_of(dev->tx_reqs.next, struct usb_request, list); 542 list_del(&req->list); 543 544 /* temporarily stop TX queue when the freelist empties */ 545 if (list_empty(&dev->tx_reqs)) 546 netif_stop_queue(net); 547 spin_unlock_irqrestore(&dev->req_lock, flags); 548 549 /* no buffer copies needed, unless the network stack did it 550 * or the hardware can't use skb buffers. 551 * or there's not enough space for extra headers we need 552 */ 553 if (dev->wrap) { 554 unsigned long flags; 555 556 spin_lock_irqsave(&dev->lock, flags); 557 if (dev->port_usb) 558 skb = dev->wrap(dev->port_usb, skb); 559 spin_unlock_irqrestore(&dev->lock, flags); 560 if (!skb) { 561 /* Multi frame CDC protocols may store the frame for 562 * later which is not a dropped frame. 563 */ 564 if (dev->port_usb->supports_multi_frame) 565 goto multiframe; 566 goto drop; 567 } 568 } 569 570 length = skb->len; 571 req->buf = skb->data; 572 req->context = skb; 573 req->complete = tx_complete; 574 575 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 576 if (dev->port_usb->is_fixed && 577 length == dev->port_usb->fixed_in_len && 578 (length % in->maxpacket) == 0) 579 req->zero = 0; 580 else 581 req->zero = 1; 582 583 /* use zlp framing on tx for strict CDC-Ether conformance, 584 * though any robust network rx path ignores extra padding. 585 * and some hardware doesn't like to write zlps. 586 */ 587 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 588 length++; 589 590 req->length = length; 591 592 /* throttle high/super speed IRQ rate back slightly */ 593 if (gadget_is_dualspeed(dev->gadget)) 594 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 595 dev->gadget->speed == USB_SPEED_SUPER) 596 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) 597 : 0; 598 599 retval = usb_ep_queue(in, req, GFP_ATOMIC); 600 switch (retval) { 601 default: 602 DBG(dev, "tx queue err %d\n", retval); 603 break; 604 case 0: 605 net->trans_start = jiffies; 606 atomic_inc(&dev->tx_qlen); 607 } 608 609 if (retval) { 610 dev_kfree_skb_any(skb); 611 drop: 612 dev->net->stats.tx_dropped++; 613 multiframe: 614 spin_lock_irqsave(&dev->req_lock, flags); 615 if (list_empty(&dev->tx_reqs)) 616 netif_start_queue(net); 617 list_add(&req->list, &dev->tx_reqs); 618 spin_unlock_irqrestore(&dev->req_lock, flags); 619 } 620 return NETDEV_TX_OK; 621 } 622 623 /*-------------------------------------------------------------------------*/ 624 625 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 626 { 627 DBG(dev, "%s\n", __func__); 628 629 /* fill the rx queue */ 630 rx_fill(dev, gfp_flags); 631 632 /* and open the tx floodgates */ 633 atomic_set(&dev->tx_qlen, 0); 634 netif_wake_queue(dev->net); 635 } 636 637 static int eth_open(struct net_device *net) 638 { 639 struct eth_dev *dev = netdev_priv(net); 640 struct gether *link; 641 642 DBG(dev, "%s\n", __func__); 643 if (netif_carrier_ok(dev->net)) 644 eth_start(dev, GFP_KERNEL); 645 646 spin_lock_irq(&dev->lock); 647 link = dev->port_usb; 648 if (link && link->open) 649 link->open(link); 650 spin_unlock_irq(&dev->lock); 651 652 return 0; 653 } 654 655 static int eth_stop(struct net_device *net) 656 { 657 struct eth_dev *dev = netdev_priv(net); 658 unsigned long flags; 659 660 VDBG(dev, "%s\n", __func__); 661 netif_stop_queue(net); 662 663 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 664 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 665 dev->net->stats.rx_errors, dev->net->stats.tx_errors 666 ); 667 668 /* ensure there are no more active requests */ 669 spin_lock_irqsave(&dev->lock, flags); 670 if (dev->port_usb) { 671 struct gether *link = dev->port_usb; 672 const struct usb_endpoint_descriptor *in; 673 const struct usb_endpoint_descriptor *out; 674 675 if (link->close) 676 link->close(link); 677 678 /* NOTE: we have no abort-queue primitive we could use 679 * to cancel all pending I/O. Instead, we disable then 680 * reenable the endpoints ... this idiom may leave toggle 681 * wrong, but that's a self-correcting error. 682 * 683 * REVISIT: we *COULD* just let the transfers complete at 684 * their own pace; the network stack can handle old packets. 685 * For the moment we leave this here, since it works. 686 */ 687 in = link->in_ep->desc; 688 out = link->out_ep->desc; 689 usb_ep_disable(link->in_ep); 690 usb_ep_disable(link->out_ep); 691 if (netif_carrier_ok(net)) { 692 DBG(dev, "host still using in/out endpoints\n"); 693 link->in_ep->desc = in; 694 link->out_ep->desc = out; 695 usb_ep_enable(link->in_ep); 696 usb_ep_enable(link->out_ep); 697 } 698 } 699 spin_unlock_irqrestore(&dev->lock, flags); 700 701 return 0; 702 } 703 704 /*-------------------------------------------------------------------------*/ 705 706 static int get_ether_addr(const char *str, u8 *dev_addr) 707 { 708 if (str) { 709 unsigned i; 710 711 for (i = 0; i < 6; i++) { 712 unsigned char num; 713 714 if ((*str == '.') || (*str == ':')) 715 str++; 716 num = hex_to_bin(*str++) << 4; 717 num |= hex_to_bin(*str++); 718 dev_addr [i] = num; 719 } 720 if (is_valid_ether_addr(dev_addr)) 721 return 0; 722 } 723 eth_random_addr(dev_addr); 724 return 1; 725 } 726 727 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 728 { 729 if (len < 18) 730 return -EINVAL; 731 732 snprintf(str, len, "%pM", dev_addr); 733 return 18; 734 } 735 736 static const struct net_device_ops eth_netdev_ops = { 737 .ndo_open = eth_open, 738 .ndo_stop = eth_stop, 739 .ndo_start_xmit = eth_start_xmit, 740 .ndo_change_mtu = ueth_change_mtu, 741 .ndo_set_mac_address = eth_mac_addr, 742 .ndo_validate_addr = eth_validate_addr, 743 }; 744 745 static struct device_type gadget_type = { 746 .name = "gadget", 747 }; 748 749 /** 750 * gether_setup_name - initialize one ethernet-over-usb link 751 * @g: gadget to associated with these links 752 * @ethaddr: NULL, or a buffer in which the ethernet address of the 753 * host side of the link is recorded 754 * @netname: name for network device (for example, "usb") 755 * Context: may sleep 756 * 757 * This sets up the single network link that may be exported by a 758 * gadget driver using this framework. The link layer addresses are 759 * set up using module parameters. 760 * 761 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 762 */ 763 struct eth_dev *gether_setup_name(struct usb_gadget *g, 764 const char *dev_addr, const char *host_addr, 765 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 766 { 767 struct eth_dev *dev; 768 struct net_device *net; 769 int status; 770 771 net = alloc_etherdev(sizeof *dev); 772 if (!net) 773 return ERR_PTR(-ENOMEM); 774 775 dev = netdev_priv(net); 776 spin_lock_init(&dev->lock); 777 spin_lock_init(&dev->req_lock); 778 INIT_WORK(&dev->work, eth_work); 779 INIT_LIST_HEAD(&dev->tx_reqs); 780 INIT_LIST_HEAD(&dev->rx_reqs); 781 782 skb_queue_head_init(&dev->rx_frames); 783 784 /* network device setup */ 785 dev->net = net; 786 dev->qmult = qmult; 787 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 788 789 if (get_ether_addr(dev_addr, net->dev_addr)) 790 dev_warn(&g->dev, 791 "using random %s ethernet address\n", "self"); 792 if (get_ether_addr(host_addr, dev->host_mac)) 793 dev_warn(&g->dev, 794 "using random %s ethernet address\n", "host"); 795 796 if (ethaddr) 797 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 798 799 net->netdev_ops = ð_netdev_ops; 800 801 net->ethtool_ops = &ops; 802 803 dev->gadget = g; 804 SET_NETDEV_DEV(net, &g->dev); 805 SET_NETDEV_DEVTYPE(net, &gadget_type); 806 807 status = register_netdev(net); 808 if (status < 0) { 809 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 810 free_netdev(net); 811 dev = ERR_PTR(status); 812 } else { 813 INFO(dev, "MAC %pM\n", net->dev_addr); 814 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 815 816 /* 817 * two kinds of host-initiated state changes: 818 * - iff DATA transfer is active, carrier is "on" 819 * - tx queueing enabled if open *and* carrier is "on" 820 */ 821 netif_carrier_off(net); 822 } 823 824 return dev; 825 } 826 EXPORT_SYMBOL_GPL(gether_setup_name); 827 828 struct net_device *gether_setup_name_default(const char *netname) 829 { 830 struct net_device *net; 831 struct eth_dev *dev; 832 833 net = alloc_etherdev(sizeof(*dev)); 834 if (!net) 835 return ERR_PTR(-ENOMEM); 836 837 dev = netdev_priv(net); 838 spin_lock_init(&dev->lock); 839 spin_lock_init(&dev->req_lock); 840 INIT_WORK(&dev->work, eth_work); 841 INIT_LIST_HEAD(&dev->tx_reqs); 842 INIT_LIST_HEAD(&dev->rx_reqs); 843 844 skb_queue_head_init(&dev->rx_frames); 845 846 /* network device setup */ 847 dev->net = net; 848 dev->qmult = QMULT_DEFAULT; 849 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 850 851 eth_random_addr(dev->dev_mac); 852 pr_warn("using random %s ethernet address\n", "self"); 853 eth_random_addr(dev->host_mac); 854 pr_warn("using random %s ethernet address\n", "host"); 855 856 net->netdev_ops = ð_netdev_ops; 857 858 net->ethtool_ops = &ops; 859 SET_NETDEV_DEVTYPE(net, &gadget_type); 860 861 return net; 862 } 863 EXPORT_SYMBOL_GPL(gether_setup_name_default); 864 865 int gether_register_netdev(struct net_device *net) 866 { 867 struct eth_dev *dev; 868 struct usb_gadget *g; 869 struct sockaddr sa; 870 int status; 871 872 if (!net->dev.parent) 873 return -EINVAL; 874 dev = netdev_priv(net); 875 g = dev->gadget; 876 status = register_netdev(net); 877 if (status < 0) { 878 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 879 return status; 880 } else { 881 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 882 883 /* two kinds of host-initiated state changes: 884 * - iff DATA transfer is active, carrier is "on" 885 * - tx queueing enabled if open *and* carrier is "on" 886 */ 887 netif_carrier_off(net); 888 } 889 sa.sa_family = net->type; 890 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); 891 rtnl_lock(); 892 status = dev_set_mac_address(net, &sa); 893 rtnl_unlock(); 894 if (status) 895 pr_warn("cannot set self ethernet address: %d\n", status); 896 else 897 INFO(dev, "MAC %pM\n", dev->dev_mac); 898 899 return status; 900 } 901 EXPORT_SYMBOL_GPL(gether_register_netdev); 902 903 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 904 { 905 struct eth_dev *dev; 906 907 dev = netdev_priv(net); 908 dev->gadget = g; 909 SET_NETDEV_DEV(net, &g->dev); 910 } 911 EXPORT_SYMBOL_GPL(gether_set_gadget); 912 913 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 914 { 915 struct eth_dev *dev; 916 u8 new_addr[ETH_ALEN]; 917 918 dev = netdev_priv(net); 919 if (get_ether_addr(dev_addr, new_addr)) 920 return -EINVAL; 921 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 922 return 0; 923 } 924 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 925 926 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 927 { 928 struct eth_dev *dev; 929 930 dev = netdev_priv(net); 931 return get_ether_addr_str(dev->dev_mac, dev_addr, len); 932 } 933 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 934 935 int gether_set_host_addr(struct net_device *net, const char *host_addr) 936 { 937 struct eth_dev *dev; 938 u8 new_addr[ETH_ALEN]; 939 940 dev = netdev_priv(net); 941 if (get_ether_addr(host_addr, new_addr)) 942 return -EINVAL; 943 memcpy(dev->host_mac, new_addr, ETH_ALEN); 944 return 0; 945 } 946 EXPORT_SYMBOL_GPL(gether_set_host_addr); 947 948 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 949 { 950 struct eth_dev *dev; 951 952 dev = netdev_priv(net); 953 return get_ether_addr_str(dev->host_mac, host_addr, len); 954 } 955 EXPORT_SYMBOL_GPL(gether_get_host_addr); 956 957 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 958 { 959 struct eth_dev *dev; 960 961 if (len < 13) 962 return -EINVAL; 963 964 dev = netdev_priv(net); 965 snprintf(host_addr, len, "%pm", dev->host_mac); 966 967 return strlen(host_addr); 968 } 969 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 970 971 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 972 { 973 struct eth_dev *dev; 974 975 dev = netdev_priv(net); 976 memcpy(host_mac, dev->host_mac, ETH_ALEN); 977 } 978 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 979 980 void gether_set_qmult(struct net_device *net, unsigned qmult) 981 { 982 struct eth_dev *dev; 983 984 dev = netdev_priv(net); 985 dev->qmult = qmult; 986 } 987 EXPORT_SYMBOL_GPL(gether_set_qmult); 988 989 unsigned gether_get_qmult(struct net_device *net) 990 { 991 struct eth_dev *dev; 992 993 dev = netdev_priv(net); 994 return dev->qmult; 995 } 996 EXPORT_SYMBOL_GPL(gether_get_qmult); 997 998 int gether_get_ifname(struct net_device *net, char *name, int len) 999 { 1000 rtnl_lock(); 1001 strlcpy(name, netdev_name(net), len); 1002 rtnl_unlock(); 1003 return strlen(name); 1004 } 1005 EXPORT_SYMBOL_GPL(gether_get_ifname); 1006 1007 /** 1008 * gether_cleanup - remove Ethernet-over-USB device 1009 * Context: may sleep 1010 * 1011 * This is called to free all resources allocated by @gether_setup(). 1012 */ 1013 void gether_cleanup(struct eth_dev *dev) 1014 { 1015 if (!dev) 1016 return; 1017 1018 unregister_netdev(dev->net); 1019 flush_work(&dev->work); 1020 free_netdev(dev->net); 1021 } 1022 EXPORT_SYMBOL_GPL(gether_cleanup); 1023 1024 /** 1025 * gether_connect - notify network layer that USB link is active 1026 * @link: the USB link, set up with endpoints, descriptors matching 1027 * current device speed, and any framing wrapper(s) set up. 1028 * Context: irqs blocked 1029 * 1030 * This is called to activate endpoints and let the network layer know 1031 * the connection is active ("carrier detect"). It may cause the I/O 1032 * queues to open and start letting network packets flow, but will in 1033 * any case activate the endpoints so that they respond properly to the 1034 * USB host. 1035 * 1036 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1037 * indicate some error code (negative errno), ep->driver_data values 1038 * have been overwritten. 1039 */ 1040 struct net_device *gether_connect(struct gether *link) 1041 { 1042 struct eth_dev *dev = link->ioport; 1043 int result = 0; 1044 1045 if (!dev) 1046 return ERR_PTR(-EINVAL); 1047 1048 link->in_ep->driver_data = dev; 1049 result = usb_ep_enable(link->in_ep); 1050 if (result != 0) { 1051 DBG(dev, "enable %s --> %d\n", 1052 link->in_ep->name, result); 1053 goto fail0; 1054 } 1055 1056 link->out_ep->driver_data = dev; 1057 result = usb_ep_enable(link->out_ep); 1058 if (result != 0) { 1059 DBG(dev, "enable %s --> %d\n", 1060 link->out_ep->name, result); 1061 goto fail1; 1062 } 1063 1064 if (result == 0) 1065 result = alloc_requests(dev, link, qlen(dev->gadget, 1066 dev->qmult)); 1067 1068 if (result == 0) { 1069 dev->zlp = link->is_zlp_ok; 1070 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1071 1072 dev->header_len = link->header_len; 1073 dev->unwrap = link->unwrap; 1074 dev->wrap = link->wrap; 1075 1076 spin_lock(&dev->lock); 1077 dev->port_usb = link; 1078 if (netif_running(dev->net)) { 1079 if (link->open) 1080 link->open(link); 1081 } else { 1082 if (link->close) 1083 link->close(link); 1084 } 1085 spin_unlock(&dev->lock); 1086 1087 netif_carrier_on(dev->net); 1088 if (netif_running(dev->net)) 1089 eth_start(dev, GFP_ATOMIC); 1090 1091 /* on error, disable any endpoints */ 1092 } else { 1093 (void) usb_ep_disable(link->out_ep); 1094 fail1: 1095 (void) usb_ep_disable(link->in_ep); 1096 } 1097 fail0: 1098 /* caller is responsible for cleanup on error */ 1099 if (result < 0) 1100 return ERR_PTR(result); 1101 return dev->net; 1102 } 1103 EXPORT_SYMBOL_GPL(gether_connect); 1104 1105 /** 1106 * gether_disconnect - notify network layer that USB link is inactive 1107 * @link: the USB link, on which gether_connect() was called 1108 * Context: irqs blocked 1109 * 1110 * This is called to deactivate endpoints and let the network layer know 1111 * the connection went inactive ("no carrier"). 1112 * 1113 * On return, the state is as if gether_connect() had never been called. 1114 * The endpoints are inactive, and accordingly without active USB I/O. 1115 * Pointers to endpoint descriptors and endpoint private data are nulled. 1116 */ 1117 void gether_disconnect(struct gether *link) 1118 { 1119 struct eth_dev *dev = link->ioport; 1120 struct usb_request *req; 1121 1122 WARN_ON(!dev); 1123 if (!dev) 1124 return; 1125 1126 DBG(dev, "%s\n", __func__); 1127 1128 netif_stop_queue(dev->net); 1129 netif_carrier_off(dev->net); 1130 1131 /* disable endpoints, forcing (synchronous) completion 1132 * of all pending i/o. then free the request objects 1133 * and forget about the endpoints. 1134 */ 1135 usb_ep_disable(link->in_ep); 1136 spin_lock(&dev->req_lock); 1137 while (!list_empty(&dev->tx_reqs)) { 1138 req = container_of(dev->tx_reqs.next, 1139 struct usb_request, list); 1140 list_del(&req->list); 1141 1142 spin_unlock(&dev->req_lock); 1143 usb_ep_free_request(link->in_ep, req); 1144 spin_lock(&dev->req_lock); 1145 } 1146 spin_unlock(&dev->req_lock); 1147 link->in_ep->driver_data = NULL; 1148 link->in_ep->desc = NULL; 1149 1150 usb_ep_disable(link->out_ep); 1151 spin_lock(&dev->req_lock); 1152 while (!list_empty(&dev->rx_reqs)) { 1153 req = container_of(dev->rx_reqs.next, 1154 struct usb_request, list); 1155 list_del(&req->list); 1156 1157 spin_unlock(&dev->req_lock); 1158 usb_ep_free_request(link->out_ep, req); 1159 spin_lock(&dev->req_lock); 1160 } 1161 spin_unlock(&dev->req_lock); 1162 link->out_ep->driver_data = NULL; 1163 link->out_ep->desc = NULL; 1164 1165 /* finish forgetting about this USB link episode */ 1166 dev->header_len = 0; 1167 dev->unwrap = NULL; 1168 dev->wrap = NULL; 1169 1170 spin_lock(&dev->lock); 1171 dev->port_usb = NULL; 1172 spin_unlock(&dev->lock); 1173 } 1174 EXPORT_SYMBOL_GPL(gether_disconnect); 1175 1176 MODULE_LICENSE("GPL"); 1177 MODULE_AUTHOR("David Brownell"); 1178