1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 4 * 5 * Copyright (C) 2003-2005,2008 David Brownell 6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 7 * Copyright (C) 2008 Nokia Corporation 8 */ 9 10 /* #define VERBOSE_DEBUG */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/gfp.h> 15 #include <linux/device.h> 16 #include <linux/ctype.h> 17 #include <linux/etherdevice.h> 18 #include <linux/ethtool.h> 19 #include <linux/hex.h> 20 #include <linux/if_vlan.h> 21 #include <linux/string_helpers.h> 22 #include <linux/usb/composite.h> 23 24 #include "u_ether.h" 25 26 27 /* 28 * This component encapsulates the Ethernet link glue needed to provide 29 * one (!) network link through the USB gadget stack, normally "usb0". 30 * 31 * The control and data models are handled by the function driver which 32 * connects to this code; such as CDC Ethernet (ECM or EEM), 33 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 34 * management. 35 * 36 * Link level addressing is handled by this component using module 37 * parameters; if no such parameters are provided, random link level 38 * addresses are used. Each end of the link uses one address. The 39 * host end address is exported in various ways, and is often recorded 40 * in configuration databases. 41 * 42 * The driver which assembles each configuration using such a link is 43 * responsible for ensuring that each configuration includes at most one 44 * instance of is network link. (The network layer provides ways for 45 * this single "physical" link to be used by multiple virtual links.) 46 */ 47 48 #define UETH__VERSION "29-May-2008" 49 50 /* Experiments show that both Linux and Windows hosts allow up to 16k 51 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k 52 * blocks and still have efficient handling. */ 53 #define GETHER_MAX_MTU_SIZE 15412 54 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) 55 56 struct eth_dev { 57 /* lock is held while accessing port_usb 58 */ 59 spinlock_t lock; 60 struct gether *port_usb; 61 62 struct net_device *net; 63 struct usb_gadget *gadget; 64 65 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 66 struct list_head tx_reqs, rx_reqs; 67 atomic_t tx_qlen; 68 69 struct sk_buff_head rx_frames; 70 71 unsigned qmult; 72 73 unsigned header_len; 74 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 75 int (*unwrap)(struct gether *, 76 struct sk_buff *skb, 77 struct sk_buff_head *list); 78 79 struct work_struct work; 80 81 unsigned long todo; 82 #define WORK_RX_MEMORY 0 83 84 bool zlp; 85 bool no_skb_reserve; 86 bool ifname_set; 87 u8 host_mac[ETH_ALEN]; 88 u8 dev_mac[ETH_ALEN]; 89 }; 90 91 /*-------------------------------------------------------------------------*/ 92 93 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 94 95 #define DEFAULT_QLEN 2 /* double buffering by default */ 96 97 /* use deeper queues at high/super speed */ 98 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 99 { 100 if (gadget->speed == USB_SPEED_HIGH || gadget->speed >= USB_SPEED_SUPER) 101 return qmult * DEFAULT_QLEN; 102 else 103 return DEFAULT_QLEN; 104 } 105 106 /*-------------------------------------------------------------------------*/ 107 108 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 109 110 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 111 { 112 struct eth_dev *dev = netdev_priv(net); 113 114 strscpy(p->driver, "g_ether", sizeof(p->driver)); 115 strscpy(p->version, UETH__VERSION, sizeof(p->version)); 116 if (dev->gadget) { 117 strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 118 strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 119 } 120 } 121 122 /* REVISIT can also support: 123 * - WOL (by tracking suspends and issuing remote wakeup) 124 * - msglevel (implies updated messaging) 125 * - ... probably more ethtool ops 126 */ 127 128 static const struct ethtool_ops ops = { 129 .get_drvinfo = eth_get_drvinfo, 130 .get_link = ethtool_op_get_link, 131 }; 132 133 static void defer_kevent(struct eth_dev *dev, int flag) 134 { 135 if (test_and_set_bit(flag, &dev->todo)) 136 return; 137 if (!schedule_work(&dev->work)) 138 ERROR(dev, "kevent %d may have been dropped\n", flag); 139 else 140 DBG(dev, "kevent %d scheduled\n", flag); 141 } 142 143 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 144 145 static int 146 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 147 { 148 struct usb_gadget *g = dev->gadget; 149 struct sk_buff *skb; 150 int retval = -ENOMEM; 151 size_t size = 0; 152 struct usb_ep *out; 153 unsigned long flags; 154 155 spin_lock_irqsave(&dev->lock, flags); 156 if (dev->port_usb) 157 out = dev->port_usb->out_ep; 158 else 159 out = NULL; 160 161 if (!out) 162 { 163 spin_unlock_irqrestore(&dev->lock, flags); 164 return -ENOTCONN; 165 } 166 167 /* Padding up to RX_EXTRA handles minor disagreements with host. 168 * Normally we use the USB "terminate on short read" convention; 169 * so allow up to (N*maxpacket), since that memory is normally 170 * already allocated. Some hardware doesn't deal well with short 171 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 172 * byte off the end (to force hardware errors on overflow). 173 * 174 * RNDIS uses internal framing, and explicitly allows senders to 175 * pad to end-of-packet. That's potentially nice for speed, but 176 * means receivers can't recover lost synch on their own (because 177 * new packets don't only start after a short RX). 178 */ 179 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 180 size += dev->port_usb->header_len; 181 182 if (g->quirk_ep_out_aligned_size) { 183 size += out->maxpacket - 1; 184 size -= size % out->maxpacket; 185 } 186 187 if (dev->port_usb->is_fixed) 188 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 189 spin_unlock_irqrestore(&dev->lock, flags); 190 191 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); 192 if (skb == NULL) { 193 DBG(dev, "no rx skb\n"); 194 goto enomem; 195 } 196 197 /* Some platforms perform better when IP packets are aligned, 198 * but on at least one, checksumming fails otherwise. Note: 199 * RNDIS headers involve variable numbers of LE32 values. 200 */ 201 if (likely(!dev->no_skb_reserve)) 202 skb_reserve(skb, NET_IP_ALIGN); 203 204 req->buf = skb->data; 205 req->length = size; 206 req->complete = rx_complete; 207 req->context = skb; 208 209 retval = usb_ep_queue(out, req, gfp_flags); 210 if (retval == -ENOMEM) 211 enomem: 212 defer_kevent(dev, WORK_RX_MEMORY); 213 if (retval) { 214 DBG(dev, "rx submit --> %d\n", retval); 215 if (skb) 216 dev_kfree_skb_any(skb); 217 spin_lock_irqsave(&dev->req_lock, flags); 218 list_add(&req->list, &dev->rx_reqs); 219 spin_unlock_irqrestore(&dev->req_lock, flags); 220 } 221 return retval; 222 } 223 224 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 225 { 226 struct sk_buff *skb = req->context, *skb2; 227 struct eth_dev *dev = ep->driver_data; 228 int status = req->status; 229 230 switch (status) { 231 232 /* normal completion */ 233 case 0: 234 skb_put(skb, req->actual); 235 236 if (dev->unwrap) { 237 unsigned long flags; 238 239 spin_lock_irqsave(&dev->lock, flags); 240 if (dev->port_usb) { 241 status = dev->unwrap(dev->port_usb, 242 skb, 243 &dev->rx_frames); 244 } else { 245 dev_kfree_skb_any(skb); 246 status = -ENOTCONN; 247 } 248 spin_unlock_irqrestore(&dev->lock, flags); 249 } else { 250 skb_queue_tail(&dev->rx_frames, skb); 251 } 252 skb = NULL; 253 254 skb2 = skb_dequeue(&dev->rx_frames); 255 while (skb2) { 256 if (status < 0 257 || ETH_HLEN > skb2->len 258 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { 259 dev->net->stats.rx_errors++; 260 dev->net->stats.rx_length_errors++; 261 DBG(dev, "rx length %d\n", skb2->len); 262 dev_kfree_skb_any(skb2); 263 goto next_frame; 264 } 265 skb2->protocol = eth_type_trans(skb2, dev->net); 266 dev->net->stats.rx_packets++; 267 dev->net->stats.rx_bytes += skb2->len; 268 269 /* no buffer copies needed, unless hardware can't 270 * use skb buffers. 271 */ 272 status = netif_rx(skb2); 273 next_frame: 274 skb2 = skb_dequeue(&dev->rx_frames); 275 } 276 break; 277 278 /* software-driven interface shutdown */ 279 case -ECONNRESET: /* unlink */ 280 case -ESHUTDOWN: /* disconnect etc */ 281 VDBG(dev, "rx shutdown, code %d\n", status); 282 goto quiesce; 283 284 /* for hardware automagic (such as pxa) */ 285 case -ECONNABORTED: /* endpoint reset */ 286 DBG(dev, "rx %s reset\n", ep->name); 287 defer_kevent(dev, WORK_RX_MEMORY); 288 quiesce: 289 dev_kfree_skb_any(skb); 290 goto clean; 291 292 /* data overrun */ 293 case -EOVERFLOW: 294 dev->net->stats.rx_over_errors++; 295 fallthrough; 296 297 default: 298 dev->net->stats.rx_errors++; 299 DBG(dev, "rx status %d\n", status); 300 break; 301 } 302 303 if (skb) 304 dev_kfree_skb_any(skb); 305 if (!netif_running(dev->net)) { 306 clean: 307 spin_lock(&dev->req_lock); 308 list_add(&req->list, &dev->rx_reqs); 309 spin_unlock(&dev->req_lock); 310 req = NULL; 311 } 312 if (req) 313 rx_submit(dev, req, GFP_ATOMIC); 314 } 315 316 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 317 { 318 unsigned i; 319 struct usb_request *req; 320 321 if (!n) 322 return -ENOMEM; 323 324 /* queue/recycle up to N requests */ 325 i = n; 326 list_for_each_entry(req, list, list) { 327 if (i-- == 0) 328 goto extra; 329 } 330 while (i--) { 331 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 332 if (!req) 333 return list_empty(list) ? -ENOMEM : 0; 334 list_add(&req->list, list); 335 } 336 return 0; 337 338 extra: 339 /* free extras */ 340 for (;;) { 341 struct list_head *next; 342 343 next = req->list.next; 344 list_del(&req->list); 345 usb_ep_free_request(ep, req); 346 347 if (next == list) 348 break; 349 350 req = container_of(next, struct usb_request, list); 351 } 352 return 0; 353 } 354 355 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 356 { 357 int status; 358 359 spin_lock(&dev->req_lock); 360 status = prealloc(&dev->tx_reqs, link->in_ep, n); 361 if (status < 0) 362 goto fail; 363 status = prealloc(&dev->rx_reqs, link->out_ep, n); 364 if (status < 0) 365 goto fail; 366 goto done; 367 fail: 368 DBG(dev, "can't alloc requests\n"); 369 done: 370 spin_unlock(&dev->req_lock); 371 return status; 372 } 373 374 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 375 { 376 struct usb_request *req; 377 unsigned long flags; 378 379 /* fill unused rxq slots with some skb */ 380 spin_lock_irqsave(&dev->req_lock, flags); 381 while (!list_empty(&dev->rx_reqs)) { 382 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 383 list_del_init(&req->list); 384 spin_unlock_irqrestore(&dev->req_lock, flags); 385 386 if (rx_submit(dev, req, gfp_flags) < 0) { 387 defer_kevent(dev, WORK_RX_MEMORY); 388 return; 389 } 390 391 spin_lock_irqsave(&dev->req_lock, flags); 392 } 393 spin_unlock_irqrestore(&dev->req_lock, flags); 394 } 395 396 static void eth_work(struct work_struct *work) 397 { 398 struct eth_dev *dev = container_of(work, struct eth_dev, work); 399 400 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 401 if (netif_running(dev->net)) 402 rx_fill(dev, GFP_KERNEL); 403 } 404 405 if (dev->todo) 406 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 407 } 408 409 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 410 { 411 struct sk_buff *skb = req->context; 412 struct eth_dev *dev = ep->driver_data; 413 414 switch (req->status) { 415 default: 416 dev->net->stats.tx_errors++; 417 VDBG(dev, "tx err %d\n", req->status); 418 fallthrough; 419 case -ECONNRESET: /* unlink */ 420 case -ESHUTDOWN: /* disconnect etc */ 421 dev_kfree_skb_any(skb); 422 break; 423 case 0: 424 dev->net->stats.tx_bytes += skb->len; 425 dev_consume_skb_any(skb); 426 } 427 dev->net->stats.tx_packets++; 428 429 spin_lock(&dev->req_lock); 430 list_add(&req->list, &dev->tx_reqs); 431 spin_unlock(&dev->req_lock); 432 433 atomic_dec(&dev->tx_qlen); 434 if (netif_carrier_ok(dev->net)) 435 netif_wake_queue(dev->net); 436 } 437 438 static inline int is_promisc(u16 cdc_filter) 439 { 440 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 441 } 442 443 static int ether_wakeup_host(struct gether *port) 444 { 445 int ret; 446 struct usb_function *func = &port->func; 447 struct usb_gadget *gadget = func->config->cdev->gadget; 448 449 if (func->func_suspended) 450 ret = usb_func_wakeup(func); 451 else 452 ret = usb_gadget_wakeup(gadget); 453 454 return ret; 455 } 456 457 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 458 struct net_device *net) 459 { 460 struct eth_dev *dev = netdev_priv(net); 461 int length = 0; 462 int retval; 463 struct usb_request *req = NULL; 464 unsigned long flags; 465 struct usb_ep *in; 466 u16 cdc_filter; 467 468 spin_lock_irqsave(&dev->lock, flags); 469 if (dev->port_usb) { 470 in = dev->port_usb->in_ep; 471 cdc_filter = dev->port_usb->cdc_filter; 472 } else { 473 in = NULL; 474 cdc_filter = 0; 475 } 476 477 if (dev->port_usb && dev->port_usb->is_suspend) { 478 DBG(dev, "Port suspended. Triggering wakeup\n"); 479 netif_stop_queue(net); 480 spin_unlock_irqrestore(&dev->lock, flags); 481 ether_wakeup_host(dev->port_usb); 482 return NETDEV_TX_BUSY; 483 } 484 485 spin_unlock_irqrestore(&dev->lock, flags); 486 487 if (!in) { 488 if (skb) 489 dev_kfree_skb_any(skb); 490 return NETDEV_TX_OK; 491 } 492 493 /* apply outgoing CDC or RNDIS filters */ 494 if (skb && !is_promisc(cdc_filter)) { 495 u8 *dest = skb->data; 496 497 if (is_multicast_ether_addr(dest)) { 498 u16 type; 499 500 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 501 * SET_ETHERNET_MULTICAST_FILTERS requests 502 */ 503 if (is_broadcast_ether_addr(dest)) 504 type = USB_CDC_PACKET_TYPE_BROADCAST; 505 else 506 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 507 if (!(cdc_filter & type)) { 508 dev_kfree_skb_any(skb); 509 return NETDEV_TX_OK; 510 } 511 } 512 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 513 } 514 515 spin_lock_irqsave(&dev->req_lock, flags); 516 /* 517 * this freelist can be empty if an interrupt triggered disconnect() 518 * and reconfigured the gadget (shutting down this queue) after the 519 * network stack decided to xmit but before we got the spinlock. 520 */ 521 if (list_empty(&dev->tx_reqs)) { 522 spin_unlock_irqrestore(&dev->req_lock, flags); 523 return NETDEV_TX_BUSY; 524 } 525 526 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 527 list_del(&req->list); 528 529 /* temporarily stop TX queue when the freelist empties */ 530 if (list_empty(&dev->tx_reqs)) 531 netif_stop_queue(net); 532 spin_unlock_irqrestore(&dev->req_lock, flags); 533 534 /* no buffer copies needed, unless the network stack did it 535 * or the hardware can't use skb buffers. 536 * or there's not enough space for extra headers we need 537 */ 538 if (dev->wrap) { 539 unsigned long flags; 540 541 spin_lock_irqsave(&dev->lock, flags); 542 if (dev->port_usb) 543 skb = dev->wrap(dev->port_usb, skb); 544 spin_unlock_irqrestore(&dev->lock, flags); 545 if (!skb) { 546 /* Multi frame CDC protocols may store the frame for 547 * later which is not a dropped frame. 548 */ 549 if (dev->port_usb && 550 dev->port_usb->supports_multi_frame) 551 goto multiframe; 552 goto drop; 553 } 554 } 555 556 length = skb->len; 557 req->buf = skb->data; 558 req->context = skb; 559 req->complete = tx_complete; 560 561 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 562 if (dev->port_usb && 563 dev->port_usb->is_fixed && 564 length == dev->port_usb->fixed_in_len && 565 (length % in->maxpacket) == 0) 566 req->zero = 0; 567 else 568 req->zero = 1; 569 570 /* use zlp framing on tx for strict CDC-Ether conformance, 571 * though any robust network rx path ignores extra padding. 572 * and some hardware doesn't like to write zlps. 573 */ 574 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 575 length++; 576 577 req->length = length; 578 579 retval = usb_ep_queue(in, req, GFP_ATOMIC); 580 switch (retval) { 581 default: 582 DBG(dev, "tx queue err %d\n", retval); 583 break; 584 case 0: 585 netif_trans_update(net); 586 atomic_inc(&dev->tx_qlen); 587 } 588 589 if (retval) { 590 dev_kfree_skb_any(skb); 591 drop: 592 dev->net->stats.tx_dropped++; 593 multiframe: 594 spin_lock_irqsave(&dev->req_lock, flags); 595 if (list_empty(&dev->tx_reqs)) 596 netif_start_queue(net); 597 list_add(&req->list, &dev->tx_reqs); 598 spin_unlock_irqrestore(&dev->req_lock, flags); 599 } 600 return NETDEV_TX_OK; 601 } 602 603 /*-------------------------------------------------------------------------*/ 604 605 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 606 { 607 DBG(dev, "%s\n", __func__); 608 609 /* fill the rx queue */ 610 rx_fill(dev, gfp_flags); 611 612 /* and open the tx floodgates */ 613 atomic_set(&dev->tx_qlen, 0); 614 netif_wake_queue(dev->net); 615 } 616 617 static int eth_open(struct net_device *net) 618 { 619 struct eth_dev *dev = netdev_priv(net); 620 struct gether *link; 621 622 DBG(dev, "%s\n", __func__); 623 if (netif_carrier_ok(dev->net)) 624 eth_start(dev, GFP_KERNEL); 625 626 spin_lock_irq(&dev->lock); 627 link = dev->port_usb; 628 if (link && link->open) 629 link->open(link); 630 spin_unlock_irq(&dev->lock); 631 632 return 0; 633 } 634 635 static int eth_stop(struct net_device *net) 636 { 637 struct eth_dev *dev = netdev_priv(net); 638 unsigned long flags; 639 640 VDBG(dev, "%s\n", __func__); 641 netif_stop_queue(net); 642 643 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 644 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 645 dev->net->stats.rx_errors, dev->net->stats.tx_errors 646 ); 647 648 /* ensure there are no more active requests */ 649 spin_lock_irqsave(&dev->lock, flags); 650 if (dev->port_usb) { 651 struct gether *link = dev->port_usb; 652 const struct usb_endpoint_descriptor *in; 653 const struct usb_endpoint_descriptor *out; 654 655 if (link->close) 656 link->close(link); 657 658 /* NOTE: we have no abort-queue primitive we could use 659 * to cancel all pending I/O. Instead, we disable then 660 * reenable the endpoints ... this idiom may leave toggle 661 * wrong, but that's a self-correcting error. 662 * 663 * REVISIT: we *COULD* just let the transfers complete at 664 * their own pace; the network stack can handle old packets. 665 * For the moment we leave this here, since it works. 666 */ 667 in = link->in_ep->desc; 668 out = link->out_ep->desc; 669 usb_ep_disable(link->in_ep); 670 usb_ep_disable(link->out_ep); 671 if (netif_carrier_ok(net)) { 672 DBG(dev, "host still using in/out endpoints\n"); 673 link->in_ep->desc = in; 674 link->out_ep->desc = out; 675 usb_ep_enable(link->in_ep); 676 usb_ep_enable(link->out_ep); 677 } 678 } 679 spin_unlock_irqrestore(&dev->lock, flags); 680 681 return 0; 682 } 683 684 /*-------------------------------------------------------------------------*/ 685 686 static int get_ether_addr(const char *str, u8 *dev_addr) 687 { 688 if (str) { 689 unsigned i; 690 691 for (i = 0; i < 6; i++) { 692 unsigned char num; 693 694 if ((*str == '.') || (*str == ':')) 695 str++; 696 num = hex_to_bin(*str++) << 4; 697 num |= hex_to_bin(*str++); 698 dev_addr [i] = num; 699 } 700 if (is_valid_ether_addr(dev_addr)) 701 return 0; 702 } 703 eth_random_addr(dev_addr); 704 return 1; 705 } 706 707 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 708 { 709 if (len < 18) 710 return -EINVAL; 711 712 snprintf(str, len, "%pM", dev_addr); 713 return 18; 714 } 715 716 static const struct net_device_ops eth_netdev_ops = { 717 .ndo_open = eth_open, 718 .ndo_stop = eth_stop, 719 .ndo_start_xmit = eth_start_xmit, 720 .ndo_set_mac_address = eth_mac_addr, 721 .ndo_validate_addr = eth_validate_addr, 722 }; 723 724 static const struct device_type gadget_type = { 725 .name = "gadget", 726 }; 727 728 /* 729 * gether_setup_name - initialize one ethernet-over-usb link 730 * @g: gadget to associated with these links 731 * @ethaddr: NULL, or a buffer in which the ethernet address of the 732 * host side of the link is recorded 733 * @netname: name for network device (for example, "usb") 734 * Context: may sleep 735 * 736 * This sets up the single network link that may be exported by a 737 * gadget driver using this framework. The link layer addresses are 738 * set up using module parameters. 739 * 740 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 741 */ 742 struct eth_dev *gether_setup_name(struct usb_gadget *g, 743 const char *dev_addr, const char *host_addr, 744 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 745 { 746 struct eth_dev *dev; 747 struct net_device *net; 748 int status; 749 u8 addr[ETH_ALEN]; 750 751 net = alloc_etherdev(sizeof *dev); 752 if (!net) 753 return ERR_PTR(-ENOMEM); 754 755 dev = netdev_priv(net); 756 spin_lock_init(&dev->lock); 757 spin_lock_init(&dev->req_lock); 758 INIT_WORK(&dev->work, eth_work); 759 INIT_LIST_HEAD(&dev->tx_reqs); 760 INIT_LIST_HEAD(&dev->rx_reqs); 761 762 skb_queue_head_init(&dev->rx_frames); 763 764 /* network device setup */ 765 dev->net = net; 766 dev->qmult = qmult; 767 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 768 769 if (get_ether_addr(dev_addr, addr)) { 770 net->addr_assign_type = NET_ADDR_RANDOM; 771 dev_warn(&g->dev, 772 "using random %s ethernet address\n", "self"); 773 } else { 774 net->addr_assign_type = NET_ADDR_SET; 775 } 776 eth_hw_addr_set(net, addr); 777 if (get_ether_addr(host_addr, dev->host_mac)) 778 dev_warn(&g->dev, 779 "using random %s ethernet address\n", "host"); 780 781 if (ethaddr) 782 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 783 784 net->netdev_ops = ð_netdev_ops; 785 786 net->ethtool_ops = &ops; 787 788 /* MTU range: 14 - 15412 */ 789 net->min_mtu = ETH_HLEN; 790 net->max_mtu = GETHER_MAX_MTU_SIZE; 791 792 dev->gadget = g; 793 SET_NETDEV_DEV(net, &g->dev); 794 SET_NETDEV_DEVTYPE(net, &gadget_type); 795 796 status = register_netdev(net); 797 if (status < 0) { 798 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 799 free_netdev(net); 800 dev = ERR_PTR(status); 801 } else { 802 INFO(dev, "MAC %pM\n", net->dev_addr); 803 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 804 805 /* 806 * two kinds of host-initiated state changes: 807 * - iff DATA transfer is active, carrier is "on" 808 * - tx queueing enabled if open *and* carrier is "on" 809 */ 810 netif_carrier_off(net); 811 } 812 813 return dev; 814 } 815 EXPORT_SYMBOL_GPL(gether_setup_name); 816 817 struct net_device *gether_setup_name_default(const char *netname) 818 { 819 struct net_device *net; 820 struct eth_dev *dev; 821 822 net = alloc_etherdev(sizeof(*dev)); 823 if (!net) 824 return ERR_PTR(-ENOMEM); 825 826 dev = netdev_priv(net); 827 spin_lock_init(&dev->lock); 828 spin_lock_init(&dev->req_lock); 829 INIT_WORK(&dev->work, eth_work); 830 INIT_LIST_HEAD(&dev->tx_reqs); 831 INIT_LIST_HEAD(&dev->rx_reqs); 832 833 skb_queue_head_init(&dev->rx_frames); 834 835 /* network device setup */ 836 dev->net = net; 837 dev->qmult = QMULT_DEFAULT; 838 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 839 840 eth_random_addr(dev->dev_mac); 841 842 /* by default we always have a random MAC address */ 843 net->addr_assign_type = NET_ADDR_RANDOM; 844 845 eth_random_addr(dev->host_mac); 846 847 net->netdev_ops = ð_netdev_ops; 848 849 net->ethtool_ops = &ops; 850 SET_NETDEV_DEVTYPE(net, &gadget_type); 851 852 /* MTU range: 14 - 15412 */ 853 net->min_mtu = ETH_HLEN; 854 net->max_mtu = GETHER_MAX_MTU_SIZE; 855 856 return net; 857 } 858 EXPORT_SYMBOL_GPL(gether_setup_name_default); 859 860 int gether_register_netdev(struct net_device *net) 861 { 862 struct eth_dev *dev; 863 struct usb_gadget *g; 864 int status; 865 866 if (!net->dev.parent) 867 return -EINVAL; 868 dev = netdev_priv(net); 869 g = dev->gadget; 870 871 eth_hw_addr_set(net, dev->dev_mac); 872 873 status = register_netdev(net); 874 if (status < 0) { 875 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 876 return status; 877 } else { 878 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 879 INFO(dev, "MAC %pM\n", dev->dev_mac); 880 881 /* two kinds of host-initiated state changes: 882 * - iff DATA transfer is active, carrier is "on" 883 * - tx queueing enabled if open *and* carrier is "on" 884 */ 885 netif_carrier_off(net); 886 } 887 888 return status; 889 } 890 EXPORT_SYMBOL_GPL(gether_register_netdev); 891 892 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 893 { 894 struct eth_dev *dev; 895 896 dev = netdev_priv(net); 897 dev->gadget = g; 898 SET_NETDEV_DEV(net, &g->dev); 899 } 900 EXPORT_SYMBOL_GPL(gether_set_gadget); 901 902 int gether_attach_gadget(struct net_device *net, struct usb_gadget *g) 903 { 904 int ret; 905 906 ret = device_move(&net->dev, &g->dev, DPM_ORDER_DEV_AFTER_PARENT); 907 if (ret) 908 return ret; 909 910 gether_set_gadget(net, g); 911 return 0; 912 } 913 EXPORT_SYMBOL_GPL(gether_attach_gadget); 914 915 void gether_detach_gadget(struct net_device *net) 916 { 917 struct eth_dev *dev = netdev_priv(net); 918 919 device_move(&net->dev, NULL, DPM_ORDER_NONE); 920 dev->gadget = NULL; 921 } 922 EXPORT_SYMBOL_GPL(gether_detach_gadget); 923 924 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 925 { 926 struct eth_dev *dev; 927 u8 new_addr[ETH_ALEN]; 928 929 dev = netdev_priv(net); 930 if (get_ether_addr(dev_addr, new_addr)) 931 return -EINVAL; 932 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 933 net->addr_assign_type = NET_ADDR_SET; 934 return 0; 935 } 936 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 937 938 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 939 { 940 struct eth_dev *dev; 941 int ret; 942 943 dev = netdev_priv(net); 944 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len); 945 if (ret + 1 < len) { 946 dev_addr[ret++] = '\n'; 947 dev_addr[ret] = '\0'; 948 } 949 950 return ret; 951 } 952 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 953 954 int gether_set_host_addr(struct net_device *net, const char *host_addr) 955 { 956 struct eth_dev *dev; 957 u8 new_addr[ETH_ALEN]; 958 959 dev = netdev_priv(net); 960 if (get_ether_addr(host_addr, new_addr)) 961 return -EINVAL; 962 memcpy(dev->host_mac, new_addr, ETH_ALEN); 963 return 0; 964 } 965 EXPORT_SYMBOL_GPL(gether_set_host_addr); 966 967 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 968 { 969 struct eth_dev *dev; 970 int ret; 971 972 dev = netdev_priv(net); 973 ret = get_ether_addr_str(dev->host_mac, host_addr, len); 974 if (ret + 1 < len) { 975 host_addr[ret++] = '\n'; 976 host_addr[ret] = '\0'; 977 } 978 979 return ret; 980 } 981 EXPORT_SYMBOL_GPL(gether_get_host_addr); 982 983 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 984 { 985 struct eth_dev *dev; 986 987 if (len < 13) 988 return -EINVAL; 989 990 dev = netdev_priv(net); 991 snprintf(host_addr, len, "%pm", dev->host_mac); 992 993 string_upper(host_addr, host_addr); 994 995 return strlen(host_addr); 996 } 997 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 998 999 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 1000 { 1001 struct eth_dev *dev; 1002 1003 dev = netdev_priv(net); 1004 memcpy(host_mac, dev->host_mac, ETH_ALEN); 1005 } 1006 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 1007 1008 void gether_set_qmult(struct net_device *net, unsigned qmult) 1009 { 1010 struct eth_dev *dev; 1011 1012 dev = netdev_priv(net); 1013 dev->qmult = qmult; 1014 } 1015 EXPORT_SYMBOL_GPL(gether_set_qmult); 1016 1017 unsigned gether_get_qmult(struct net_device *net) 1018 { 1019 struct eth_dev *dev; 1020 1021 dev = netdev_priv(net); 1022 return dev->qmult; 1023 } 1024 EXPORT_SYMBOL_GPL(gether_get_qmult); 1025 1026 int gether_get_ifname(struct net_device *net, char *name, int len) 1027 { 1028 struct eth_dev *dev = netdev_priv(net); 1029 int ret; 1030 1031 rtnl_lock(); 1032 ret = scnprintf(name, len, "%s\n", 1033 dev->ifname_set ? net->name : netdev_name(net)); 1034 rtnl_unlock(); 1035 return ret; 1036 } 1037 EXPORT_SYMBOL_GPL(gether_get_ifname); 1038 1039 int gether_set_ifname(struct net_device *net, const char *name, int len) 1040 { 1041 struct eth_dev *dev = netdev_priv(net); 1042 char tmp[IFNAMSIZ]; 1043 const char *p; 1044 1045 if (name[len - 1] == '\n') 1046 len--; 1047 1048 if (len >= sizeof(tmp)) 1049 return -E2BIG; 1050 1051 strscpy(tmp, name, len + 1); 1052 if (!dev_valid_name(tmp)) 1053 return -EINVAL; 1054 1055 /* Require exactly one %d, so binding will not fail with EEXIST. */ 1056 p = strchr(name, '%'); 1057 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1058 return -EINVAL; 1059 1060 strscpy(net->name, tmp); 1061 dev->ifname_set = true; 1062 1063 return 0; 1064 } 1065 EXPORT_SYMBOL_GPL(gether_set_ifname); 1066 1067 void gether_suspend(struct gether *link) 1068 { 1069 struct eth_dev *dev = link->ioport; 1070 unsigned long flags; 1071 1072 if (!dev) 1073 return; 1074 1075 if (atomic_read(&dev->tx_qlen)) { 1076 /* 1077 * There is a transfer in progress. So we trigger a remote 1078 * wakeup to inform the host. 1079 */ 1080 if (!ether_wakeup_host(dev->port_usb)) 1081 return; 1082 } 1083 spin_lock_irqsave(&dev->lock, flags); 1084 link->is_suspend = true; 1085 spin_unlock_irqrestore(&dev->lock, flags); 1086 } 1087 EXPORT_SYMBOL_GPL(gether_suspend); 1088 1089 void gether_resume(struct gether *link) 1090 { 1091 struct eth_dev *dev = link->ioport; 1092 unsigned long flags; 1093 1094 if (!dev) 1095 return; 1096 1097 if (netif_queue_stopped(dev->net)) 1098 netif_start_queue(dev->net); 1099 1100 spin_lock_irqsave(&dev->lock, flags); 1101 link->is_suspend = false; 1102 spin_unlock_irqrestore(&dev->lock, flags); 1103 } 1104 EXPORT_SYMBOL_GPL(gether_resume); 1105 1106 /* 1107 * gether_cleanup - remove Ethernet-over-USB device 1108 * Context: may sleep 1109 * 1110 * This is called to free all resources allocated by @gether_setup(). 1111 */ 1112 void gether_cleanup(struct eth_dev *dev) 1113 { 1114 if (!dev) 1115 return; 1116 1117 unregister_netdev(dev->net); 1118 flush_work(&dev->work); 1119 free_netdev(dev->net); 1120 } 1121 EXPORT_SYMBOL_GPL(gether_cleanup); 1122 1123 /** 1124 * gether_connect - notify network layer that USB link is active 1125 * @link: the USB link, set up with endpoints, descriptors matching 1126 * current device speed, and any framing wrapper(s) set up. 1127 * Context: irqs blocked 1128 * 1129 * This is called to activate endpoints and let the network layer know 1130 * the connection is active ("carrier detect"). It may cause the I/O 1131 * queues to open and start letting network packets flow, but will in 1132 * any case activate the endpoints so that they respond properly to the 1133 * USB host. 1134 * 1135 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1136 * indicate some error code (negative errno), ep->driver_data values 1137 * have been overwritten. 1138 */ 1139 struct net_device *gether_connect(struct gether *link) 1140 { 1141 struct eth_dev *dev = link->ioport; 1142 int result = 0; 1143 1144 if (!dev) 1145 return ERR_PTR(-EINVAL); 1146 1147 link->in_ep->driver_data = dev; 1148 result = usb_ep_enable(link->in_ep); 1149 if (result != 0) { 1150 DBG(dev, "enable %s --> %d\n", 1151 link->in_ep->name, result); 1152 goto fail0; 1153 } 1154 1155 link->out_ep->driver_data = dev; 1156 result = usb_ep_enable(link->out_ep); 1157 if (result != 0) { 1158 DBG(dev, "enable %s --> %d\n", 1159 link->out_ep->name, result); 1160 goto fail1; 1161 } 1162 1163 if (result == 0) 1164 result = alloc_requests(dev, link, qlen(dev->gadget, 1165 dev->qmult)); 1166 1167 if (result == 0) { 1168 dev->zlp = link->is_zlp_ok; 1169 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget); 1170 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1171 1172 dev->header_len = link->header_len; 1173 dev->unwrap = link->unwrap; 1174 dev->wrap = link->wrap; 1175 1176 spin_lock(&dev->lock); 1177 dev->port_usb = link; 1178 if (netif_running(dev->net)) { 1179 if (link->open) 1180 link->open(link); 1181 } else { 1182 if (link->close) 1183 link->close(link); 1184 } 1185 spin_unlock(&dev->lock); 1186 1187 netif_carrier_on(dev->net); 1188 if (netif_running(dev->net)) 1189 eth_start(dev, GFP_ATOMIC); 1190 1191 /* on error, disable any endpoints */ 1192 } else { 1193 (void) usb_ep_disable(link->out_ep); 1194 fail1: 1195 (void) usb_ep_disable(link->in_ep); 1196 } 1197 fail0: 1198 /* caller is responsible for cleanup on error */ 1199 if (result < 0) 1200 return ERR_PTR(result); 1201 return dev->net; 1202 } 1203 EXPORT_SYMBOL_GPL(gether_connect); 1204 1205 /** 1206 * gether_disconnect - notify network layer that USB link is inactive 1207 * @link: the USB link, on which gether_connect() was called 1208 * Context: irqs blocked 1209 * 1210 * This is called to deactivate endpoints and let the network layer know 1211 * the connection went inactive ("no carrier"). 1212 * 1213 * On return, the state is as if gether_connect() had never been called. 1214 * The endpoints are inactive, and accordingly without active USB I/O. 1215 * Pointers to endpoint descriptors and endpoint private data are nulled. 1216 */ 1217 void gether_disconnect(struct gether *link) 1218 { 1219 struct eth_dev *dev = link->ioport; 1220 struct usb_request *req; 1221 1222 WARN_ON(!dev); 1223 if (!dev) 1224 return; 1225 1226 DBG(dev, "%s\n", __func__); 1227 1228 spin_lock(&dev->lock); 1229 dev->port_usb = NULL; 1230 link->is_suspend = false; 1231 spin_unlock(&dev->lock); 1232 1233 netif_stop_queue(dev->net); 1234 netif_carrier_off(dev->net); 1235 1236 /* disable endpoints, forcing (synchronous) completion 1237 * of all pending i/o. then free the request objects 1238 * and forget about the endpoints. 1239 */ 1240 usb_ep_disable(link->in_ep); 1241 spin_lock(&dev->req_lock); 1242 while (!list_empty(&dev->tx_reqs)) { 1243 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 1244 list_del(&req->list); 1245 1246 spin_unlock(&dev->req_lock); 1247 usb_ep_free_request(link->in_ep, req); 1248 spin_lock(&dev->req_lock); 1249 } 1250 spin_unlock(&dev->req_lock); 1251 link->in_ep->desc = NULL; 1252 1253 usb_ep_disable(link->out_ep); 1254 spin_lock(&dev->req_lock); 1255 while (!list_empty(&dev->rx_reqs)) { 1256 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 1257 list_del(&req->list); 1258 1259 spin_unlock(&dev->req_lock); 1260 usb_ep_free_request(link->out_ep, req); 1261 spin_lock(&dev->req_lock); 1262 } 1263 spin_unlock(&dev->req_lock); 1264 link->out_ep->desc = NULL; 1265 1266 /* finish forgetting about this USB link episode */ 1267 dev->header_len = 0; 1268 dev->unwrap = NULL; 1269 dev->wrap = NULL; 1270 } 1271 EXPORT_SYMBOL_GPL(gether_disconnect); 1272 1273 MODULE_DESCRIPTION("Ethernet-over-USB link layer utilities for Gadget stack"); 1274 MODULE_LICENSE("GPL"); 1275 MODULE_AUTHOR("David Brownell"); 1276