1 /* 2 * USB Network driver infrastructure 3 * Copyright (C) 2000-2005 by David Brownell 4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 /* 22 * This is a generic "USB networking" framework that works with several 23 * kinds of full and high speed networking devices: host-to-host cables, 24 * smart usb peripherals, and actual Ethernet adapters. 25 * 26 * These devices usually differ in terms of control protocols (if they 27 * even have one!) and sometimes they define new framing to wrap or batch 28 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 29 * so interface (un)binding, endpoint I/O queues, fault handling, and other 30 * issues can usefully be addressed by this framework. 31 */ 32 33 // #define DEBUG // error path messages, extra info 34 // #define VERBOSE // more; success messages 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/ctype.h> 41 #include <linux/ethtool.h> 42 #include <linux/workqueue.h> 43 #include <linux/mii.h> 44 #include <linux/usb.h> 45 #include <linux/usb/usbnet.h> 46 #include <linux/slab.h> 47 #include <linux/kernel.h> 48 #include <linux/pm_runtime.h> 49 50 #define DRIVER_VERSION "22-Aug-2005" 51 52 53 /*-------------------------------------------------------------------------*/ 54 55 /* 56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 57 * Several dozen bytes of IPv4 data can fit in two such transactions. 58 * One maximum size Ethernet packet takes twenty four of them. 59 * For high speed, each frame comfortably fits almost 36 max size 60 * Ethernet packets (so queues should be bigger). 61 * 62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 63 * let the USB host controller be busy for 5msec or more before an irq 64 * is required, under load. Jumbograms change the equation. 65 */ 66 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) 71 72 // reawaken network queue this soon after stopping; else watchdog barks 73 #define TX_TIMEOUT_JIFFIES (5*HZ) 74 75 // throttle rx/tx briefly after some faults, so khubd might disconnect() 76 // us (it polls at HZ/4 usually) before we report too many false errors. 77 #define THROTTLE_JIFFIES (HZ/8) 78 79 // between wakeups 80 #define UNLINK_TIMEOUT_MS 3 81 82 /*-------------------------------------------------------------------------*/ 83 84 // randomly generated ethernet address 85 static u8 node_id [ETH_ALEN]; 86 87 static const char driver_name [] = "usbnet"; 88 89 /* use ethtool to change the level for any given device */ 90 static int msg_level = -1; 91 module_param (msg_level, int, 0); 92 MODULE_PARM_DESC (msg_level, "Override default message level"); 93 94 /*-------------------------------------------------------------------------*/ 95 96 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 98 { 99 int tmp; 100 struct usb_host_interface *alt = NULL; 101 struct usb_host_endpoint *in = NULL, *out = NULL; 102 struct usb_host_endpoint *status = NULL; 103 104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 105 unsigned ep; 106 107 in = out = status = NULL; 108 alt = intf->altsetting + tmp; 109 110 /* take the first altsetting with in-bulk + out-bulk; 111 * remember any status endpoint, just in case; 112 * ignore other endpoints and altsettings. 113 */ 114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 115 struct usb_host_endpoint *e; 116 int intr = 0; 117 118 e = alt->endpoint + ep; 119 switch (e->desc.bmAttributes) { 120 case USB_ENDPOINT_XFER_INT: 121 if (!usb_endpoint_dir_in(&e->desc)) 122 continue; 123 intr = 1; 124 /* FALLTHROUGH */ 125 case USB_ENDPOINT_XFER_BULK: 126 break; 127 default: 128 continue; 129 } 130 if (usb_endpoint_dir_in(&e->desc)) { 131 if (!intr && !in) 132 in = e; 133 else if (intr && !status) 134 status = e; 135 } else { 136 if (!out) 137 out = e; 138 } 139 } 140 if (in && out) 141 break; 142 } 143 if (!alt || !in || !out) 144 return -EINVAL; 145 146 if (alt->desc.bAlternateSetting != 0 || 147 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 149 alt->desc.bAlternateSetting); 150 if (tmp < 0) 151 return tmp; 152 } 153 154 dev->in = usb_rcvbulkpipe (dev->udev, 155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 156 dev->out = usb_sndbulkpipe (dev->udev, 157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 158 dev->status = status; 159 return 0; 160 } 161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 162 163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 164 { 165 int tmp, i; 166 unsigned char buf [13]; 167 168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 169 if (tmp != 12) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (tmp >= 0) 173 tmp = -EINVAL; 174 return tmp; 175 } 176 for (i = tmp = 0; i < 6; i++, tmp += 2) 177 dev->net->dev_addr [i] = 178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); 179 return 0; 180 } 181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 182 183 static void intr_complete (struct urb *urb) 184 { 185 struct usbnet *dev = urb->context; 186 int status = urb->status; 187 188 switch (status) { 189 /* success */ 190 case 0: 191 dev->driver_info->status(dev, urb); 192 break; 193 194 /* software-driven interface shutdown */ 195 case -ENOENT: /* urb killed */ 196 case -ESHUTDOWN: /* hardware gone */ 197 netif_dbg(dev, ifdown, dev->net, 198 "intr shutdown, code %d\n", status); 199 return; 200 201 /* NOTE: not throttling like RX/TX, since this endpoint 202 * already polls infrequently 203 */ 204 default: 205 netdev_dbg(dev->net, "intr status %d\n", status); 206 break; 207 } 208 209 if (!netif_running (dev->net)) 210 return; 211 212 status = usb_submit_urb (urb, GFP_ATOMIC); 213 if (status != 0) 214 netif_err(dev, timer, dev->net, 215 "intr resubmit --> %d\n", status); 216 } 217 218 static int init_status (struct usbnet *dev, struct usb_interface *intf) 219 { 220 char *buf = NULL; 221 unsigned pipe = 0; 222 unsigned maxp; 223 unsigned period; 224 225 if (!dev->driver_info->status) 226 return 0; 227 228 pipe = usb_rcvintpipe (dev->udev, 229 dev->status->desc.bEndpointAddress 230 & USB_ENDPOINT_NUMBER_MASK); 231 maxp = usb_maxpacket (dev->udev, pipe, 0); 232 233 /* avoid 1 msec chatter: min 8 msec poll rate */ 234 period = max ((int) dev->status->desc.bInterval, 235 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 236 237 buf = kmalloc (maxp, GFP_KERNEL); 238 if (buf) { 239 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 240 if (!dev->interrupt) { 241 kfree (buf); 242 return -ENOMEM; 243 } else { 244 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 245 buf, maxp, intr_complete, dev, period); 246 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 247 dev_dbg(&intf->dev, 248 "status ep%din, %d bytes period %d\n", 249 usb_pipeendpoint(pipe), maxp, period); 250 } 251 } 252 return 0; 253 } 254 255 /* Passes this packet up the stack, updating its accounting. 256 * Some link protocols batch packets, so their rx_fixup paths 257 * can return clones as well as just modify the original skb. 258 */ 259 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 260 { 261 int status; 262 263 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 264 skb_queue_tail(&dev->rxq_pause, skb); 265 return; 266 } 267 268 skb->protocol = eth_type_trans (skb, dev->net); 269 dev->net->stats.rx_packets++; 270 dev->net->stats.rx_bytes += skb->len; 271 272 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 273 skb->len + sizeof (struct ethhdr), skb->protocol); 274 memset (skb->cb, 0, sizeof (struct skb_data)); 275 276 if (skb_defer_rx_timestamp(skb)) 277 return; 278 279 status = netif_rx (skb); 280 if (status != NET_RX_SUCCESS) 281 netif_dbg(dev, rx_err, dev->net, 282 "netif_rx status %d\n", status); 283 } 284 EXPORT_SYMBOL_GPL(usbnet_skb_return); 285 286 287 /*------------------------------------------------------------------------- 288 * 289 * Network Device Driver (peer link to "Host Device", from USB host) 290 * 291 *-------------------------------------------------------------------------*/ 292 293 int usbnet_change_mtu (struct net_device *net, int new_mtu) 294 { 295 struct usbnet *dev = netdev_priv(net); 296 int ll_mtu = new_mtu + net->hard_header_len; 297 int old_hard_mtu = dev->hard_mtu; 298 int old_rx_urb_size = dev->rx_urb_size; 299 300 if (new_mtu <= 0) 301 return -EINVAL; 302 // no second zero-length packet read wanted after mtu-sized packets 303 if ((ll_mtu % dev->maxpacket) == 0) 304 return -EDOM; 305 net->mtu = new_mtu; 306 307 dev->hard_mtu = net->mtu + net->hard_header_len; 308 if (dev->rx_urb_size == old_hard_mtu) { 309 dev->rx_urb_size = dev->hard_mtu; 310 if (dev->rx_urb_size > old_rx_urb_size) 311 usbnet_unlink_rx_urbs(dev); 312 } 313 314 return 0; 315 } 316 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 317 318 /* The caller must hold list->lock */ 319 static void __usbnet_queue_skb(struct sk_buff_head *list, 320 struct sk_buff *newsk, enum skb_state state) 321 { 322 struct skb_data *entry = (struct skb_data *) newsk->cb; 323 324 __skb_queue_tail(list, newsk); 325 entry->state = state; 326 } 327 328 /*-------------------------------------------------------------------------*/ 329 330 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 331 * completion callbacks. 2.5 should have fixed those bugs... 332 */ 333 334 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 335 struct sk_buff_head *list, enum skb_state state) 336 { 337 unsigned long flags; 338 enum skb_state old_state; 339 struct skb_data *entry = (struct skb_data *) skb->cb; 340 341 spin_lock_irqsave(&list->lock, flags); 342 old_state = entry->state; 343 entry->state = state; 344 __skb_unlink(skb, list); 345 spin_unlock(&list->lock); 346 spin_lock(&dev->done.lock); 347 __skb_queue_tail(&dev->done, skb); 348 if (dev->done.qlen == 1) 349 tasklet_schedule(&dev->bh); 350 spin_unlock_irqrestore(&dev->done.lock, flags); 351 return old_state; 352 } 353 354 /* some work can't be done in tasklets, so we use keventd 355 * 356 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 357 * but tasklet_schedule() doesn't. hope the failure is rare. 358 */ 359 void usbnet_defer_kevent (struct usbnet *dev, int work) 360 { 361 set_bit (work, &dev->flags); 362 if (!schedule_work (&dev->kevent)) { 363 if (net_ratelimit()) 364 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 365 } else { 366 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 367 } 368 } 369 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 370 371 /*-------------------------------------------------------------------------*/ 372 373 static void rx_complete (struct urb *urb); 374 375 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 376 { 377 struct sk_buff *skb; 378 struct skb_data *entry; 379 int retval = 0; 380 unsigned long lockflags; 381 size_t size = dev->rx_urb_size; 382 383 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 384 if (!skb) { 385 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 386 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 387 usb_free_urb (urb); 388 return -ENOMEM; 389 } 390 391 entry = (struct skb_data *) skb->cb; 392 entry->urb = urb; 393 entry->dev = dev; 394 entry->length = 0; 395 396 usb_fill_bulk_urb (urb, dev->udev, dev->in, 397 skb->data, size, rx_complete, skb); 398 399 spin_lock_irqsave (&dev->rxq.lock, lockflags); 400 401 if (netif_running (dev->net) && 402 netif_device_present (dev->net) && 403 !test_bit (EVENT_RX_HALT, &dev->flags) && 404 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 405 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 406 case -EPIPE: 407 usbnet_defer_kevent (dev, EVENT_RX_HALT); 408 break; 409 case -ENOMEM: 410 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 411 break; 412 case -ENODEV: 413 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 414 netif_device_detach (dev->net); 415 break; 416 case -EHOSTUNREACH: 417 retval = -ENOLINK; 418 break; 419 default: 420 netif_dbg(dev, rx_err, dev->net, 421 "rx submit, %d\n", retval); 422 tasklet_schedule (&dev->bh); 423 break; 424 case 0: 425 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 426 } 427 } else { 428 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 429 retval = -ENOLINK; 430 } 431 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 432 if (retval) { 433 dev_kfree_skb_any (skb); 434 usb_free_urb (urb); 435 } 436 return retval; 437 } 438 439 440 /*-------------------------------------------------------------------------*/ 441 442 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 443 { 444 if (dev->driver_info->rx_fixup && 445 !dev->driver_info->rx_fixup (dev, skb)) { 446 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 447 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 448 dev->net->stats.rx_errors++; 449 goto done; 450 } 451 // else network stack removes extra byte if we forced a short packet 452 453 if (skb->len) { 454 /* all data was already cloned from skb inside the driver */ 455 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 456 dev_kfree_skb_any(skb); 457 else 458 usbnet_skb_return(dev, skb); 459 return; 460 } 461 462 netif_dbg(dev, rx_err, dev->net, "drop\n"); 463 dev->net->stats.rx_errors++; 464 done: 465 skb_queue_tail(&dev->done, skb); 466 } 467 468 /*-------------------------------------------------------------------------*/ 469 470 static void rx_complete (struct urb *urb) 471 { 472 struct sk_buff *skb = (struct sk_buff *) urb->context; 473 struct skb_data *entry = (struct skb_data *) skb->cb; 474 struct usbnet *dev = entry->dev; 475 int urb_status = urb->status; 476 enum skb_state state; 477 478 skb_put (skb, urb->actual_length); 479 state = rx_done; 480 entry->urb = NULL; 481 482 switch (urb_status) { 483 /* success */ 484 case 0: 485 if (skb->len < dev->net->hard_header_len) { 486 state = rx_cleanup; 487 dev->net->stats.rx_errors++; 488 dev->net->stats.rx_length_errors++; 489 netif_dbg(dev, rx_err, dev->net, 490 "rx length %d\n", skb->len); 491 } 492 break; 493 494 /* stalls need manual reset. this is rare ... except that 495 * when going through USB 2.0 TTs, unplug appears this way. 496 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 497 * storm, recovering as needed. 498 */ 499 case -EPIPE: 500 dev->net->stats.rx_errors++; 501 usbnet_defer_kevent (dev, EVENT_RX_HALT); 502 // FALLTHROUGH 503 504 /* software-driven interface shutdown */ 505 case -ECONNRESET: /* async unlink */ 506 case -ESHUTDOWN: /* hardware gone */ 507 netif_dbg(dev, ifdown, dev->net, 508 "rx shutdown, code %d\n", urb_status); 509 goto block; 510 511 /* we get controller i/o faults during khubd disconnect() delays. 512 * throttle down resubmits, to avoid log floods; just temporarily, 513 * so we still recover when the fault isn't a khubd delay. 514 */ 515 case -EPROTO: 516 case -ETIME: 517 case -EILSEQ: 518 dev->net->stats.rx_errors++; 519 if (!timer_pending (&dev->delay)) { 520 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 521 netif_dbg(dev, link, dev->net, 522 "rx throttle %d\n", urb_status); 523 } 524 block: 525 state = rx_cleanup; 526 entry->urb = urb; 527 urb = NULL; 528 break; 529 530 /* data overrun ... flush fifo? */ 531 case -EOVERFLOW: 532 dev->net->stats.rx_over_errors++; 533 // FALLTHROUGH 534 535 default: 536 state = rx_cleanup; 537 dev->net->stats.rx_errors++; 538 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 539 break; 540 } 541 542 state = defer_bh(dev, skb, &dev->rxq, state); 543 544 if (urb) { 545 if (netif_running (dev->net) && 546 !test_bit (EVENT_RX_HALT, &dev->flags) && 547 state != unlink_start) { 548 rx_submit (dev, urb, GFP_ATOMIC); 549 usb_mark_last_busy(dev->udev); 550 return; 551 } 552 usb_free_urb (urb); 553 } 554 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 555 } 556 557 /*-------------------------------------------------------------------------*/ 558 void usbnet_pause_rx(struct usbnet *dev) 559 { 560 set_bit(EVENT_RX_PAUSED, &dev->flags); 561 562 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 563 } 564 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 565 566 void usbnet_resume_rx(struct usbnet *dev) 567 { 568 struct sk_buff *skb; 569 int num = 0; 570 571 clear_bit(EVENT_RX_PAUSED, &dev->flags); 572 573 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 574 usbnet_skb_return(dev, skb); 575 num++; 576 } 577 578 tasklet_schedule(&dev->bh); 579 580 netif_dbg(dev, rx_status, dev->net, 581 "paused rx queue disabled, %d skbs requeued\n", num); 582 } 583 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 584 585 void usbnet_purge_paused_rxq(struct usbnet *dev) 586 { 587 skb_queue_purge(&dev->rxq_pause); 588 } 589 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 590 591 /*-------------------------------------------------------------------------*/ 592 593 // unlink pending rx/tx; completion handlers do all other cleanup 594 595 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 596 { 597 unsigned long flags; 598 struct sk_buff *skb; 599 int count = 0; 600 601 spin_lock_irqsave (&q->lock, flags); 602 while (!skb_queue_empty(q)) { 603 struct skb_data *entry; 604 struct urb *urb; 605 int retval; 606 607 skb_queue_walk(q, skb) { 608 entry = (struct skb_data *) skb->cb; 609 if (entry->state != unlink_start) 610 goto found; 611 } 612 break; 613 found: 614 entry->state = unlink_start; 615 urb = entry->urb; 616 617 /* 618 * Get reference count of the URB to avoid it to be 619 * freed during usb_unlink_urb, which may trigger 620 * use-after-free problem inside usb_unlink_urb since 621 * usb_unlink_urb is always racing with .complete 622 * handler(include defer_bh). 623 */ 624 usb_get_urb(urb); 625 spin_unlock_irqrestore(&q->lock, flags); 626 // during some PM-driven resume scenarios, 627 // these (async) unlinks complete immediately 628 retval = usb_unlink_urb (urb); 629 if (retval != -EINPROGRESS && retval != 0) 630 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 631 else 632 count++; 633 usb_put_urb(urb); 634 spin_lock_irqsave(&q->lock, flags); 635 } 636 spin_unlock_irqrestore (&q->lock, flags); 637 return count; 638 } 639 640 // Flush all pending rx urbs 641 // minidrivers may need to do this when the MTU changes 642 643 void usbnet_unlink_rx_urbs(struct usbnet *dev) 644 { 645 if (netif_running(dev->net)) { 646 (void) unlink_urbs (dev, &dev->rxq); 647 tasklet_schedule(&dev->bh); 648 } 649 } 650 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 651 652 /*-------------------------------------------------------------------------*/ 653 654 // precondition: never called in_interrupt 655 static void usbnet_terminate_urbs(struct usbnet *dev) 656 { 657 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); 658 DECLARE_WAITQUEUE(wait, current); 659 int temp; 660 661 /* ensure there are no more active urbs */ 662 add_wait_queue(&unlink_wakeup, &wait); 663 set_current_state(TASK_UNINTERRUPTIBLE); 664 dev->wait = &unlink_wakeup; 665 temp = unlink_urbs(dev, &dev->txq) + 666 unlink_urbs(dev, &dev->rxq); 667 668 /* maybe wait for deletions to finish. */ 669 while (!skb_queue_empty(&dev->rxq) 670 && !skb_queue_empty(&dev->txq) 671 && !skb_queue_empty(&dev->done)) { 672 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 673 set_current_state(TASK_UNINTERRUPTIBLE); 674 netif_dbg(dev, ifdown, dev->net, 675 "waited for %d urb completions\n", temp); 676 } 677 set_current_state(TASK_RUNNING); 678 dev->wait = NULL; 679 remove_wait_queue(&unlink_wakeup, &wait); 680 } 681 682 int usbnet_stop (struct net_device *net) 683 { 684 struct usbnet *dev = netdev_priv(net); 685 struct driver_info *info = dev->driver_info; 686 int retval; 687 688 clear_bit(EVENT_DEV_OPEN, &dev->flags); 689 netif_stop_queue (net); 690 691 netif_info(dev, ifdown, dev->net, 692 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 693 net->stats.rx_packets, net->stats.tx_packets, 694 net->stats.rx_errors, net->stats.tx_errors); 695 696 /* allow minidriver to stop correctly (wireless devices to turn off 697 * radio etc) */ 698 if (info->stop) { 699 retval = info->stop(dev); 700 if (retval < 0) 701 netif_info(dev, ifdown, dev->net, 702 "stop fail (%d) usbnet usb-%s-%s, %s\n", 703 retval, 704 dev->udev->bus->bus_name, dev->udev->devpath, 705 info->description); 706 } 707 708 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 709 usbnet_terminate_urbs(dev); 710 711 usb_kill_urb(dev->interrupt); 712 713 usbnet_purge_paused_rxq(dev); 714 715 /* deferred work (task, timer, softirq) must also stop. 716 * can't flush_scheduled_work() until we drop rtnl (later), 717 * else workers could deadlock; so make workers a NOP. 718 */ 719 dev->flags = 0; 720 del_timer_sync (&dev->delay); 721 tasklet_kill (&dev->bh); 722 if (info->manage_power && 723 !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) 724 info->manage_power(dev, 0); 725 else 726 usb_autopm_put_interface(dev->intf); 727 728 return 0; 729 } 730 EXPORT_SYMBOL_GPL(usbnet_stop); 731 732 /*-------------------------------------------------------------------------*/ 733 734 // posts reads, and enables write queuing 735 736 // precondition: never called in_interrupt 737 738 int usbnet_open (struct net_device *net) 739 { 740 struct usbnet *dev = netdev_priv(net); 741 int retval; 742 struct driver_info *info = dev->driver_info; 743 744 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 745 netif_info(dev, ifup, dev->net, 746 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 747 retval, 748 dev->udev->bus->bus_name, 749 dev->udev->devpath, 750 info->description); 751 goto done_nopm; 752 } 753 754 // put into "known safe" state 755 if (info->reset && (retval = info->reset (dev)) < 0) { 756 netif_info(dev, ifup, dev->net, 757 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 758 retval, 759 dev->udev->bus->bus_name, 760 dev->udev->devpath, 761 info->description); 762 goto done; 763 } 764 765 // insist peer be connected 766 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 767 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); 768 goto done; 769 } 770 771 /* start any status interrupt transfer */ 772 if (dev->interrupt) { 773 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 774 if (retval < 0) { 775 netif_err(dev, ifup, dev->net, 776 "intr submit %d\n", retval); 777 goto done; 778 } 779 } 780 781 set_bit(EVENT_DEV_OPEN, &dev->flags); 782 netif_start_queue (net); 783 netif_info(dev, ifup, dev->net, 784 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 785 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 786 dev->net->mtu, 787 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 788 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 789 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 790 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 792 "simple"); 793 794 // delay posting reads until we're fully open 795 tasklet_schedule (&dev->bh); 796 if (info->manage_power) { 797 retval = info->manage_power(dev, 1); 798 if (retval < 0) { 799 retval = 0; 800 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 801 } else { 802 usb_autopm_put_interface(dev->intf); 803 } 804 } 805 return retval; 806 done: 807 usb_autopm_put_interface(dev->intf); 808 done_nopm: 809 return retval; 810 } 811 EXPORT_SYMBOL_GPL(usbnet_open); 812 813 /*-------------------------------------------------------------------------*/ 814 815 /* ethtool methods; minidrivers may need to add some more, but 816 * they'll probably want to use this base set. 817 */ 818 819 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 820 { 821 struct usbnet *dev = netdev_priv(net); 822 823 if (!dev->mii.mdio_read) 824 return -EOPNOTSUPP; 825 826 return mii_ethtool_gset(&dev->mii, cmd); 827 } 828 EXPORT_SYMBOL_GPL(usbnet_get_settings); 829 830 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 831 { 832 struct usbnet *dev = netdev_priv(net); 833 int retval; 834 835 if (!dev->mii.mdio_write) 836 return -EOPNOTSUPP; 837 838 retval = mii_ethtool_sset(&dev->mii, cmd); 839 840 /* link speed/duplex might have changed */ 841 if (dev->driver_info->link_reset) 842 dev->driver_info->link_reset(dev); 843 844 return retval; 845 846 } 847 EXPORT_SYMBOL_GPL(usbnet_set_settings); 848 849 u32 usbnet_get_link (struct net_device *net) 850 { 851 struct usbnet *dev = netdev_priv(net); 852 853 /* If a check_connect is defined, return its result */ 854 if (dev->driver_info->check_connect) 855 return dev->driver_info->check_connect (dev) == 0; 856 857 /* if the device has mii operations, use those */ 858 if (dev->mii.mdio_read) 859 return mii_link_ok(&dev->mii); 860 861 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 862 return ethtool_op_get_link(net); 863 } 864 EXPORT_SYMBOL_GPL(usbnet_get_link); 865 866 int usbnet_nway_reset(struct net_device *net) 867 { 868 struct usbnet *dev = netdev_priv(net); 869 870 if (!dev->mii.mdio_write) 871 return -EOPNOTSUPP; 872 873 return mii_nway_restart(&dev->mii); 874 } 875 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 876 877 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 878 { 879 struct usbnet *dev = netdev_priv(net); 880 881 strlcpy (info->driver, dev->driver_name, sizeof info->driver); 882 strlcpy (info->version, DRIVER_VERSION, sizeof info->version); 883 strlcpy (info->fw_version, dev->driver_info->description, 884 sizeof info->fw_version); 885 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 886 } 887 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 888 889 u32 usbnet_get_msglevel (struct net_device *net) 890 { 891 struct usbnet *dev = netdev_priv(net); 892 893 return dev->msg_enable; 894 } 895 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 896 897 void usbnet_set_msglevel (struct net_device *net, u32 level) 898 { 899 struct usbnet *dev = netdev_priv(net); 900 901 dev->msg_enable = level; 902 } 903 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 904 905 /* drivers may override default ethtool_ops in their bind() routine */ 906 static const struct ethtool_ops usbnet_ethtool_ops = { 907 .get_settings = usbnet_get_settings, 908 .set_settings = usbnet_set_settings, 909 .get_link = usbnet_get_link, 910 .nway_reset = usbnet_nway_reset, 911 .get_drvinfo = usbnet_get_drvinfo, 912 .get_msglevel = usbnet_get_msglevel, 913 .set_msglevel = usbnet_set_msglevel, 914 .get_ts_info = ethtool_op_get_ts_info, 915 }; 916 917 /*-------------------------------------------------------------------------*/ 918 919 /* work that cannot be done in interrupt context uses keventd. 920 * 921 * NOTE: with 2.5 we could do more of this using completion callbacks, 922 * especially now that control transfers can be queued. 923 */ 924 static void 925 kevent (struct work_struct *work) 926 { 927 struct usbnet *dev = 928 container_of(work, struct usbnet, kevent); 929 int status; 930 931 /* usb_clear_halt() needs a thread context */ 932 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 933 unlink_urbs (dev, &dev->txq); 934 status = usb_autopm_get_interface(dev->intf); 935 if (status < 0) 936 goto fail_pipe; 937 status = usb_clear_halt (dev->udev, dev->out); 938 usb_autopm_put_interface(dev->intf); 939 if (status < 0 && 940 status != -EPIPE && 941 status != -ESHUTDOWN) { 942 if (netif_msg_tx_err (dev)) 943 fail_pipe: 944 netdev_err(dev->net, "can't clear tx halt, status %d\n", 945 status); 946 } else { 947 clear_bit (EVENT_TX_HALT, &dev->flags); 948 if (status != -ESHUTDOWN) 949 netif_wake_queue (dev->net); 950 } 951 } 952 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 953 unlink_urbs (dev, &dev->rxq); 954 status = usb_autopm_get_interface(dev->intf); 955 if (status < 0) 956 goto fail_halt; 957 status = usb_clear_halt (dev->udev, dev->in); 958 usb_autopm_put_interface(dev->intf); 959 if (status < 0 && 960 status != -EPIPE && 961 status != -ESHUTDOWN) { 962 if (netif_msg_rx_err (dev)) 963 fail_halt: 964 netdev_err(dev->net, "can't clear rx halt, status %d\n", 965 status); 966 } else { 967 clear_bit (EVENT_RX_HALT, &dev->flags); 968 tasklet_schedule (&dev->bh); 969 } 970 } 971 972 /* tasklet could resubmit itself forever if memory is tight */ 973 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 974 struct urb *urb = NULL; 975 int resched = 1; 976 977 if (netif_running (dev->net)) 978 urb = usb_alloc_urb (0, GFP_KERNEL); 979 else 980 clear_bit (EVENT_RX_MEMORY, &dev->flags); 981 if (urb != NULL) { 982 clear_bit (EVENT_RX_MEMORY, &dev->flags); 983 status = usb_autopm_get_interface(dev->intf); 984 if (status < 0) { 985 usb_free_urb(urb); 986 goto fail_lowmem; 987 } 988 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 989 resched = 0; 990 usb_autopm_put_interface(dev->intf); 991 fail_lowmem: 992 if (resched) 993 tasklet_schedule (&dev->bh); 994 } 995 } 996 997 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 998 struct driver_info *info = dev->driver_info; 999 int retval = 0; 1000 1001 clear_bit (EVENT_LINK_RESET, &dev->flags); 1002 status = usb_autopm_get_interface(dev->intf); 1003 if (status < 0) 1004 goto skip_reset; 1005 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1006 usb_autopm_put_interface(dev->intf); 1007 skip_reset: 1008 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1009 retval, 1010 dev->udev->bus->bus_name, 1011 dev->udev->devpath, 1012 info->description); 1013 } else { 1014 usb_autopm_put_interface(dev->intf); 1015 } 1016 } 1017 1018 if (dev->flags) 1019 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1020 } 1021 1022 /*-------------------------------------------------------------------------*/ 1023 1024 static void tx_complete (struct urb *urb) 1025 { 1026 struct sk_buff *skb = (struct sk_buff *) urb->context; 1027 struct skb_data *entry = (struct skb_data *) skb->cb; 1028 struct usbnet *dev = entry->dev; 1029 1030 if (urb->status == 0) { 1031 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) 1032 dev->net->stats.tx_packets++; 1033 dev->net->stats.tx_bytes += entry->length; 1034 } else { 1035 dev->net->stats.tx_errors++; 1036 1037 switch (urb->status) { 1038 case -EPIPE: 1039 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1040 break; 1041 1042 /* software-driven interface shutdown */ 1043 case -ECONNRESET: // async unlink 1044 case -ESHUTDOWN: // hardware gone 1045 break; 1046 1047 // like rx, tx gets controller i/o faults during khubd delays 1048 // and so it uses the same throttling mechanism. 1049 case -EPROTO: 1050 case -ETIME: 1051 case -EILSEQ: 1052 usb_mark_last_busy(dev->udev); 1053 if (!timer_pending (&dev->delay)) { 1054 mod_timer (&dev->delay, 1055 jiffies + THROTTLE_JIFFIES); 1056 netif_dbg(dev, link, dev->net, 1057 "tx throttle %d\n", urb->status); 1058 } 1059 netif_stop_queue (dev->net); 1060 break; 1061 default: 1062 netif_dbg(dev, tx_err, dev->net, 1063 "tx err %d\n", entry->urb->status); 1064 break; 1065 } 1066 } 1067 1068 usb_autopm_put_interface_async(dev->intf); 1069 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1070 } 1071 1072 /*-------------------------------------------------------------------------*/ 1073 1074 void usbnet_tx_timeout (struct net_device *net) 1075 { 1076 struct usbnet *dev = netdev_priv(net); 1077 1078 unlink_urbs (dev, &dev->txq); 1079 tasklet_schedule (&dev->bh); 1080 1081 // FIXME: device recovery -- reset? 1082 } 1083 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1084 1085 /*-------------------------------------------------------------------------*/ 1086 1087 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1088 struct net_device *net) 1089 { 1090 struct usbnet *dev = netdev_priv(net); 1091 int length; 1092 struct urb *urb = NULL; 1093 struct skb_data *entry; 1094 struct driver_info *info = dev->driver_info; 1095 unsigned long flags; 1096 int retval; 1097 1098 if (skb) 1099 skb_tx_timestamp(skb); 1100 1101 // some devices want funky USB-level framing, for 1102 // win32 driver (usually) and/or hardware quirks 1103 if (info->tx_fixup) { 1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1105 if (!skb) { 1106 if (netif_msg_tx_err(dev)) { 1107 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1108 goto drop; 1109 } else { 1110 /* cdc_ncm collected packet; waits for more */ 1111 goto not_drop; 1112 } 1113 } 1114 } 1115 length = skb->len; 1116 1117 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1118 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1119 goto drop; 1120 } 1121 1122 entry = (struct skb_data *) skb->cb; 1123 entry->urb = urb; 1124 entry->dev = dev; 1125 entry->length = length; 1126 1127 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1128 skb->data, skb->len, tx_complete, skb); 1129 1130 /* don't assume the hardware handles USB_ZERO_PACKET 1131 * NOTE: strictly conforming cdc-ether devices should expect 1132 * the ZLP here, but ignore the one-byte packet. 1133 * NOTE2: CDC NCM specification is different from CDC ECM when 1134 * handling ZLP/short packets, so cdc_ncm driver will make short 1135 * packet itself if needed. 1136 */ 1137 if (length % dev->maxpacket == 0) { 1138 if (!(info->flags & FLAG_SEND_ZLP)) { 1139 if (!(info->flags & FLAG_MULTI_PACKET)) { 1140 urb->transfer_buffer_length++; 1141 if (skb_tailroom(skb)) { 1142 skb->data[skb->len] = 0; 1143 __skb_put(skb, 1); 1144 } 1145 } 1146 } else 1147 urb->transfer_flags |= URB_ZERO_PACKET; 1148 } 1149 1150 spin_lock_irqsave(&dev->txq.lock, flags); 1151 retval = usb_autopm_get_interface_async(dev->intf); 1152 if (retval < 0) { 1153 spin_unlock_irqrestore(&dev->txq.lock, flags); 1154 goto drop; 1155 } 1156 1157 #ifdef CONFIG_PM 1158 /* if this triggers the device is still a sleep */ 1159 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1160 /* transmission will be done in resume */ 1161 usb_anchor_urb(urb, &dev->deferred); 1162 /* no use to process more packets */ 1163 netif_stop_queue(net); 1164 usb_put_urb(urb); 1165 spin_unlock_irqrestore(&dev->txq.lock, flags); 1166 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1167 goto deferred; 1168 } 1169 #endif 1170 1171 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1172 case -EPIPE: 1173 netif_stop_queue (net); 1174 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1175 usb_autopm_put_interface_async(dev->intf); 1176 break; 1177 default: 1178 usb_autopm_put_interface_async(dev->intf); 1179 netif_dbg(dev, tx_err, dev->net, 1180 "tx: submit urb err %d\n", retval); 1181 break; 1182 case 0: 1183 net->trans_start = jiffies; 1184 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1185 if (dev->txq.qlen >= TX_QLEN (dev)) 1186 netif_stop_queue (net); 1187 } 1188 spin_unlock_irqrestore (&dev->txq.lock, flags); 1189 1190 if (retval) { 1191 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1192 drop: 1193 dev->net->stats.tx_dropped++; 1194 not_drop: 1195 if (skb) 1196 dev_kfree_skb_any (skb); 1197 usb_free_urb (urb); 1198 } else 1199 netif_dbg(dev, tx_queued, dev->net, 1200 "> tx, len %d, type 0x%x\n", length, skb->protocol); 1201 #ifdef CONFIG_PM 1202 deferred: 1203 #endif 1204 return NETDEV_TX_OK; 1205 } 1206 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1207 1208 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1209 { 1210 struct urb *urb; 1211 int i; 1212 int ret = 0; 1213 1214 /* don't refill the queue all at once */ 1215 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1216 urb = usb_alloc_urb(0, flags); 1217 if (urb != NULL) { 1218 ret = rx_submit(dev, urb, flags); 1219 if (ret) 1220 goto err; 1221 } else { 1222 ret = -ENOMEM; 1223 goto err; 1224 } 1225 } 1226 err: 1227 return ret; 1228 } 1229 1230 /*-------------------------------------------------------------------------*/ 1231 1232 // tasklet (work deferred from completions, in_irq) or timer 1233 1234 static void usbnet_bh (unsigned long param) 1235 { 1236 struct usbnet *dev = (struct usbnet *) param; 1237 struct sk_buff *skb; 1238 struct skb_data *entry; 1239 1240 while ((skb = skb_dequeue (&dev->done))) { 1241 entry = (struct skb_data *) skb->cb; 1242 switch (entry->state) { 1243 case rx_done: 1244 entry->state = rx_cleanup; 1245 rx_process (dev, skb); 1246 continue; 1247 case tx_done: 1248 case rx_cleanup: 1249 usb_free_urb (entry->urb); 1250 dev_kfree_skb (skb); 1251 continue; 1252 default: 1253 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1254 } 1255 } 1256 1257 // waiting for all pending urbs to complete? 1258 if (dev->wait) { 1259 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1260 wake_up (dev->wait); 1261 } 1262 1263 // or are we maybe short a few urbs? 1264 } else if (netif_running (dev->net) && 1265 netif_device_present (dev->net) && 1266 !timer_pending (&dev->delay) && 1267 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1268 int temp = dev->rxq.qlen; 1269 1270 if (temp < RX_QLEN(dev)) { 1271 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1272 return; 1273 if (temp != dev->rxq.qlen) 1274 netif_dbg(dev, link, dev->net, 1275 "rxqlen %d --> %d\n", 1276 temp, dev->rxq.qlen); 1277 if (dev->rxq.qlen < RX_QLEN(dev)) 1278 tasklet_schedule (&dev->bh); 1279 } 1280 if (dev->txq.qlen < TX_QLEN (dev)) 1281 netif_wake_queue (dev->net); 1282 } 1283 } 1284 1285 1286 /*------------------------------------------------------------------------- 1287 * 1288 * USB Device Driver support 1289 * 1290 *-------------------------------------------------------------------------*/ 1291 1292 // precondition: never called in_interrupt 1293 1294 void usbnet_disconnect (struct usb_interface *intf) 1295 { 1296 struct usbnet *dev; 1297 struct usb_device *xdev; 1298 struct net_device *net; 1299 1300 dev = usb_get_intfdata(intf); 1301 usb_set_intfdata(intf, NULL); 1302 if (!dev) 1303 return; 1304 1305 xdev = interface_to_usbdev (intf); 1306 1307 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1308 intf->dev.driver->name, 1309 xdev->bus->bus_name, xdev->devpath, 1310 dev->driver_info->description); 1311 1312 net = dev->net; 1313 unregister_netdev (net); 1314 1315 cancel_work_sync(&dev->kevent); 1316 1317 usb_scuttle_anchored_urbs(&dev->deferred); 1318 1319 if (dev->driver_info->unbind) 1320 dev->driver_info->unbind (dev, intf); 1321 1322 usb_kill_urb(dev->interrupt); 1323 usb_free_urb(dev->interrupt); 1324 1325 free_netdev(net); 1326 } 1327 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1328 1329 static const struct net_device_ops usbnet_netdev_ops = { 1330 .ndo_open = usbnet_open, 1331 .ndo_stop = usbnet_stop, 1332 .ndo_start_xmit = usbnet_start_xmit, 1333 .ndo_tx_timeout = usbnet_tx_timeout, 1334 .ndo_change_mtu = usbnet_change_mtu, 1335 .ndo_set_mac_address = eth_mac_addr, 1336 .ndo_validate_addr = eth_validate_addr, 1337 }; 1338 1339 /*-------------------------------------------------------------------------*/ 1340 1341 // precondition: never called in_interrupt 1342 1343 static struct device_type wlan_type = { 1344 .name = "wlan", 1345 }; 1346 1347 static struct device_type wwan_type = { 1348 .name = "wwan", 1349 }; 1350 1351 int 1352 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1353 { 1354 struct usbnet *dev; 1355 struct net_device *net; 1356 struct usb_host_interface *interface; 1357 struct driver_info *info; 1358 struct usb_device *xdev; 1359 int status; 1360 const char *name; 1361 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1362 1363 /* usbnet already took usb runtime pm, so have to enable the feature 1364 * for usb interface, otherwise usb_autopm_get_interface may return 1365 * failure if USB_SUSPEND(RUNTIME_PM) is enabled. 1366 */ 1367 if (!driver->supports_autosuspend) { 1368 driver->supports_autosuspend = 1; 1369 pm_runtime_enable(&udev->dev); 1370 } 1371 1372 name = udev->dev.driver->name; 1373 info = (struct driver_info *) prod->driver_info; 1374 if (!info) { 1375 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1376 return -ENODEV; 1377 } 1378 xdev = interface_to_usbdev (udev); 1379 interface = udev->cur_altsetting; 1380 1381 status = -ENOMEM; 1382 1383 // set up our own records 1384 net = alloc_etherdev(sizeof(*dev)); 1385 if (!net) 1386 goto out; 1387 1388 /* netdev_printk() needs this so do it as early as possible */ 1389 SET_NETDEV_DEV(net, &udev->dev); 1390 1391 dev = netdev_priv(net); 1392 dev->udev = xdev; 1393 dev->intf = udev; 1394 dev->driver_info = info; 1395 dev->driver_name = name; 1396 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1397 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1398 skb_queue_head_init (&dev->rxq); 1399 skb_queue_head_init (&dev->txq); 1400 skb_queue_head_init (&dev->done); 1401 skb_queue_head_init(&dev->rxq_pause); 1402 dev->bh.func = usbnet_bh; 1403 dev->bh.data = (unsigned long) dev; 1404 INIT_WORK (&dev->kevent, kevent); 1405 init_usb_anchor(&dev->deferred); 1406 dev->delay.function = usbnet_bh; 1407 dev->delay.data = (unsigned long) dev; 1408 init_timer (&dev->delay); 1409 mutex_init (&dev->phy_mutex); 1410 1411 dev->net = net; 1412 strcpy (net->name, "usb%d"); 1413 memcpy (net->dev_addr, node_id, sizeof node_id); 1414 1415 /* rx and tx sides can use different message sizes; 1416 * bind() should set rx_urb_size in that case. 1417 */ 1418 dev->hard_mtu = net->mtu + net->hard_header_len; 1419 #if 0 1420 // dma_supported() is deeply broken on almost all architectures 1421 // possible with some EHCI controllers 1422 if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) 1423 net->features |= NETIF_F_HIGHDMA; 1424 #endif 1425 1426 net->netdev_ops = &usbnet_netdev_ops; 1427 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1428 net->ethtool_ops = &usbnet_ethtool_ops; 1429 1430 // allow device-specific bind/init procedures 1431 // NOTE net->name still not usable ... 1432 if (info->bind) { 1433 status = info->bind (dev, udev); 1434 if (status < 0) 1435 goto out1; 1436 1437 // heuristic: "usb%d" for links we know are two-host, 1438 // else "eth%d" when there's reasonable doubt. userspace 1439 // can rename the link if it knows better. 1440 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1441 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1442 (net->dev_addr [0] & 0x02) == 0)) 1443 strcpy (net->name, "eth%d"); 1444 /* WLAN devices should always be named "wlan%d" */ 1445 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1446 strcpy(net->name, "wlan%d"); 1447 /* WWAN devices should always be named "wwan%d" */ 1448 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1449 strcpy(net->name, "wwan%d"); 1450 1451 /* maybe the remote can't receive an Ethernet MTU */ 1452 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1453 net->mtu = dev->hard_mtu - net->hard_header_len; 1454 } else if (!info->in || !info->out) 1455 status = usbnet_get_endpoints (dev, udev); 1456 else { 1457 dev->in = usb_rcvbulkpipe (xdev, info->in); 1458 dev->out = usb_sndbulkpipe (xdev, info->out); 1459 if (!(info->flags & FLAG_NO_SETINT)) 1460 status = usb_set_interface (xdev, 1461 interface->desc.bInterfaceNumber, 1462 interface->desc.bAlternateSetting); 1463 else 1464 status = 0; 1465 1466 } 1467 if (status >= 0 && dev->status) 1468 status = init_status (dev, udev); 1469 if (status < 0) 1470 goto out3; 1471 1472 if (!dev->rx_urb_size) 1473 dev->rx_urb_size = dev->hard_mtu; 1474 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1475 1476 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1477 SET_NETDEV_DEVTYPE(net, &wlan_type); 1478 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1479 SET_NETDEV_DEVTYPE(net, &wwan_type); 1480 1481 status = register_netdev (net); 1482 if (status) 1483 goto out4; 1484 netif_info(dev, probe, dev->net, 1485 "register '%s' at usb-%s-%s, %s, %pM\n", 1486 udev->dev.driver->name, 1487 xdev->bus->bus_name, xdev->devpath, 1488 dev->driver_info->description, 1489 net->dev_addr); 1490 1491 // ok, it's ready to go. 1492 usb_set_intfdata (udev, dev); 1493 1494 netif_device_attach (net); 1495 1496 if (dev->driver_info->flags & FLAG_LINK_INTR) 1497 netif_carrier_off(net); 1498 1499 return 0; 1500 1501 out4: 1502 usb_free_urb(dev->interrupt); 1503 out3: 1504 if (info->unbind) 1505 info->unbind (dev, udev); 1506 out1: 1507 free_netdev(net); 1508 out: 1509 return status; 1510 } 1511 EXPORT_SYMBOL_GPL(usbnet_probe); 1512 1513 /*-------------------------------------------------------------------------*/ 1514 1515 /* 1516 * suspend the whole driver as soon as the first interface is suspended 1517 * resume only when the last interface is resumed 1518 */ 1519 1520 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1521 { 1522 struct usbnet *dev = usb_get_intfdata(intf); 1523 1524 if (!dev->suspend_count++) { 1525 spin_lock_irq(&dev->txq.lock); 1526 /* don't autosuspend while transmitting */ 1527 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1528 dev->suspend_count--; 1529 spin_unlock_irq(&dev->txq.lock); 1530 return -EBUSY; 1531 } else { 1532 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1533 spin_unlock_irq(&dev->txq.lock); 1534 } 1535 /* 1536 * accelerate emptying of the rx and queues, to avoid 1537 * having everything error out. 1538 */ 1539 netif_device_detach (dev->net); 1540 usbnet_terminate_urbs(dev); 1541 usb_kill_urb(dev->interrupt); 1542 1543 /* 1544 * reattach so runtime management can use and 1545 * wake the device 1546 */ 1547 netif_device_attach (dev->net); 1548 } 1549 return 0; 1550 } 1551 EXPORT_SYMBOL_GPL(usbnet_suspend); 1552 1553 int usbnet_resume (struct usb_interface *intf) 1554 { 1555 struct usbnet *dev = usb_get_intfdata(intf); 1556 struct sk_buff *skb; 1557 struct urb *res; 1558 int retval; 1559 1560 if (!--dev->suspend_count) { 1561 /* resume interrupt URBs */ 1562 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) 1563 usb_submit_urb(dev->interrupt, GFP_NOIO); 1564 1565 spin_lock_irq(&dev->txq.lock); 1566 while ((res = usb_get_from_anchor(&dev->deferred))) { 1567 1568 skb = (struct sk_buff *)res->context; 1569 retval = usb_submit_urb(res, GFP_ATOMIC); 1570 if (retval < 0) { 1571 dev_kfree_skb_any(skb); 1572 usb_free_urb(res); 1573 usb_autopm_put_interface_async(dev->intf); 1574 } else { 1575 dev->net->trans_start = jiffies; 1576 __skb_queue_tail(&dev->txq, skb); 1577 } 1578 } 1579 1580 smp_mb(); 1581 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1582 spin_unlock_irq(&dev->txq.lock); 1583 1584 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1585 /* handle remote wakeup ASAP */ 1586 if (!dev->wait && 1587 netif_device_present(dev->net) && 1588 !timer_pending(&dev->delay) && 1589 !test_bit(EVENT_RX_HALT, &dev->flags)) 1590 rx_alloc_submit(dev, GFP_NOIO); 1591 1592 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1593 netif_tx_wake_all_queues(dev->net); 1594 tasklet_schedule (&dev->bh); 1595 } 1596 } 1597 1598 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1599 usb_autopm_get_interface_no_resume(intf); 1600 1601 return 0; 1602 } 1603 EXPORT_SYMBOL_GPL(usbnet_resume); 1604 1605 /* 1606 * Either a subdriver implements manage_power, then it is assumed to always 1607 * be ready to be suspended or it reports the readiness to be suspended 1608 * explicitly 1609 */ 1610 void usbnet_device_suggests_idle(struct usbnet *dev) 1611 { 1612 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1613 dev->intf->needs_remote_wakeup = 1; 1614 usb_autopm_put_interface_async(dev->intf); 1615 } 1616 } 1617 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1618 1619 /* 1620 * For devices that can do without special commands 1621 */ 1622 int usbnet_manage_power(struct usbnet *dev, int on) 1623 { 1624 dev->intf->needs_remote_wakeup = on; 1625 return 0; 1626 } 1627 EXPORT_SYMBOL(usbnet_manage_power); 1628 1629 /*-------------------------------------------------------------------------*/ 1630 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1631 u16 value, u16 index, void *data, u16 size) 1632 { 1633 void *buf = NULL; 1634 int err = -ENOMEM; 1635 1636 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 1637 " value=0x%04x index=0x%04x size=%d\n", 1638 cmd, reqtype, value, index, size); 1639 1640 if (data) { 1641 buf = kmalloc(size, GFP_KERNEL); 1642 if (!buf) 1643 goto out; 1644 } 1645 1646 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1647 cmd, reqtype, value, index, buf, size, 1648 USB_CTRL_GET_TIMEOUT); 1649 if (err > 0 && err <= size) 1650 memcpy(data, buf, err); 1651 kfree(buf); 1652 out: 1653 return err; 1654 } 1655 1656 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1657 u16 value, u16 index, const void *data, 1658 u16 size) 1659 { 1660 void *buf = NULL; 1661 int err = -ENOMEM; 1662 1663 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 1664 " value=0x%04x index=0x%04x size=%d\n", 1665 cmd, reqtype, value, index, size); 1666 1667 if (data) { 1668 buf = kmemdup(data, size, GFP_KERNEL); 1669 if (!buf) 1670 goto out; 1671 } 1672 1673 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1674 cmd, reqtype, value, index, buf, size, 1675 USB_CTRL_SET_TIMEOUT); 1676 kfree(buf); 1677 1678 out: 1679 return err; 1680 } 1681 1682 /* 1683 * The function can't be called inside suspend/resume callback, 1684 * otherwise deadlock will be caused. 1685 */ 1686 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1687 u16 value, u16 index, void *data, u16 size) 1688 { 1689 int ret; 1690 1691 if (usb_autopm_get_interface(dev->intf) < 0) 1692 return -ENODEV; 1693 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 1694 data, size); 1695 usb_autopm_put_interface(dev->intf); 1696 return ret; 1697 } 1698 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 1699 1700 /* 1701 * The function can't be called inside suspend/resume callback, 1702 * otherwise deadlock will be caused. 1703 */ 1704 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1705 u16 value, u16 index, const void *data, u16 size) 1706 { 1707 int ret; 1708 1709 if (usb_autopm_get_interface(dev->intf) < 0) 1710 return -ENODEV; 1711 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 1712 data, size); 1713 usb_autopm_put_interface(dev->intf); 1714 return ret; 1715 } 1716 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 1717 1718 /* 1719 * The function can be called inside suspend/resume callback safely 1720 * and should only be called by suspend/resume callback generally. 1721 */ 1722 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 1723 u16 value, u16 index, void *data, u16 size) 1724 { 1725 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 1726 data, size); 1727 } 1728 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 1729 1730 /* 1731 * The function can be called inside suspend/resume callback safely 1732 * and should only be called by suspend/resume callback generally. 1733 */ 1734 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 1735 u16 value, u16 index, const void *data, 1736 u16 size) 1737 { 1738 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 1739 data, size); 1740 } 1741 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 1742 1743 static void usbnet_async_cmd_cb(struct urb *urb) 1744 { 1745 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 1746 int status = urb->status; 1747 1748 if (status < 0) 1749 dev_dbg(&urb->dev->dev, "%s failed with %d", 1750 __func__, status); 1751 1752 kfree(req); 1753 usb_free_urb(urb); 1754 } 1755 1756 /* 1757 * The caller must make sure that device can't be put into suspend 1758 * state until the control URB completes. 1759 */ 1760 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 1761 u16 value, u16 index, const void *data, u16 size) 1762 { 1763 struct usb_ctrlrequest *req = NULL; 1764 struct urb *urb; 1765 int err = -ENOMEM; 1766 void *buf = NULL; 1767 1768 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 1769 " value=0x%04x index=0x%04x size=%d\n", 1770 cmd, reqtype, value, index, size); 1771 1772 urb = usb_alloc_urb(0, GFP_ATOMIC); 1773 if (!urb) { 1774 netdev_err(dev->net, "Error allocating URB in" 1775 " %s!\n", __func__); 1776 goto fail; 1777 } 1778 1779 if (data) { 1780 buf = kmemdup(data, size, GFP_ATOMIC); 1781 if (!buf) { 1782 netdev_err(dev->net, "Error allocating buffer" 1783 " in %s!\n", __func__); 1784 goto fail_free; 1785 } 1786 } 1787 1788 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 1789 if (!req) { 1790 netdev_err(dev->net, "Failed to allocate memory for %s\n", 1791 __func__); 1792 goto fail_free_buf; 1793 } 1794 1795 req->bRequestType = reqtype; 1796 req->bRequest = cmd; 1797 req->wValue = cpu_to_le16(value); 1798 req->wIndex = cpu_to_le16(index); 1799 req->wLength = cpu_to_le16(size); 1800 1801 usb_fill_control_urb(urb, dev->udev, 1802 usb_sndctrlpipe(dev->udev, 0), 1803 (void *)req, buf, size, 1804 usbnet_async_cmd_cb, req); 1805 urb->transfer_flags |= URB_FREE_BUFFER; 1806 1807 err = usb_submit_urb(urb, GFP_ATOMIC); 1808 if (err < 0) { 1809 netdev_err(dev->net, "Error submitting the control" 1810 " message: status=%d\n", err); 1811 goto fail_free; 1812 } 1813 return 0; 1814 1815 fail_free_buf: 1816 kfree(buf); 1817 fail_free: 1818 kfree(req); 1819 usb_free_urb(urb); 1820 fail: 1821 return err; 1822 1823 } 1824 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 1825 /*-------------------------------------------------------------------------*/ 1826 1827 static int __init usbnet_init(void) 1828 { 1829 /* Compiler should optimize this out. */ 1830 BUILD_BUG_ON( 1831 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 1832 1833 eth_random_addr(node_id); 1834 return 0; 1835 } 1836 module_init(usbnet_init); 1837 1838 static void __exit usbnet_exit(void) 1839 { 1840 } 1841 module_exit(usbnet_exit); 1842 1843 MODULE_AUTHOR("David Brownell"); 1844 MODULE_DESCRIPTION("USB network driver framework"); 1845 MODULE_LICENSE("GPL"); 1846