1 /* 2 * USB Network driver infrastructure 3 * Copyright (C) 2000-2005 by David Brownell 4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 /* 22 * This is a generic "USB networking" framework that works with several 23 * kinds of full and high speed networking devices: host-to-host cables, 24 * smart usb peripherals, and actual Ethernet adapters. 25 * 26 * These devices usually differ in terms of control protocols (if they 27 * even have one!) and sometimes they define new framing to wrap or batch 28 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 29 * so interface (un)binding, endpoint I/O queues, fault handling, and other 30 * issues can usefully be addressed by this framework. 31 */ 32 33 // #define DEBUG // error path messages, extra info 34 // #define VERBOSE // more; success messages 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/ctype.h> 41 #include <linux/ethtool.h> 42 #include <linux/workqueue.h> 43 #include <linux/mii.h> 44 #include <linux/usb.h> 45 #include <linux/usb/usbnet.h> 46 #include <linux/slab.h> 47 #include <linux/kernel.h> 48 #include <linux/pm_runtime.h> 49 50 #define DRIVER_VERSION "22-Aug-2005" 51 52 53 /*-------------------------------------------------------------------------*/ 54 55 /* 56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 57 * Several dozen bytes of IPv4 data can fit in two such transactions. 58 * One maximum size Ethernet packet takes twenty four of them. 59 * For high speed, each frame comfortably fits almost 36 max size 60 * Ethernet packets (so queues should be bigger). 61 * 62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 63 * let the USB host controller be busy for 5msec or more before an irq 64 * is required, under load. Jumbograms change the equation. 65 */ 66 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) 71 72 // reawaken network queue this soon after stopping; else watchdog barks 73 #define TX_TIMEOUT_JIFFIES (5*HZ) 74 75 // throttle rx/tx briefly after some faults, so khubd might disconnect() 76 // us (it polls at HZ/4 usually) before we report too many false errors. 77 #define THROTTLE_JIFFIES (HZ/8) 78 79 // between wakeups 80 #define UNLINK_TIMEOUT_MS 3 81 82 /*-------------------------------------------------------------------------*/ 83 84 // randomly generated ethernet address 85 static u8 node_id [ETH_ALEN]; 86 87 static const char driver_name [] = "usbnet"; 88 89 /* use ethtool to change the level for any given device */ 90 static int msg_level = -1; 91 module_param (msg_level, int, 0); 92 MODULE_PARM_DESC (msg_level, "Override default message level"); 93 94 /*-------------------------------------------------------------------------*/ 95 96 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 98 { 99 int tmp; 100 struct usb_host_interface *alt = NULL; 101 struct usb_host_endpoint *in = NULL, *out = NULL; 102 struct usb_host_endpoint *status = NULL; 103 104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 105 unsigned ep; 106 107 in = out = status = NULL; 108 alt = intf->altsetting + tmp; 109 110 /* take the first altsetting with in-bulk + out-bulk; 111 * remember any status endpoint, just in case; 112 * ignore other endpoints and altsettings. 113 */ 114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 115 struct usb_host_endpoint *e; 116 int intr = 0; 117 118 e = alt->endpoint + ep; 119 switch (e->desc.bmAttributes) { 120 case USB_ENDPOINT_XFER_INT: 121 if (!usb_endpoint_dir_in(&e->desc)) 122 continue; 123 intr = 1; 124 /* FALLTHROUGH */ 125 case USB_ENDPOINT_XFER_BULK: 126 break; 127 default: 128 continue; 129 } 130 if (usb_endpoint_dir_in(&e->desc)) { 131 if (!intr && !in) 132 in = e; 133 else if (intr && !status) 134 status = e; 135 } else { 136 if (!out) 137 out = e; 138 } 139 } 140 if (in && out) 141 break; 142 } 143 if (!alt || !in || !out) 144 return -EINVAL; 145 146 if (alt->desc.bAlternateSetting != 0 || 147 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 149 alt->desc.bAlternateSetting); 150 if (tmp < 0) 151 return tmp; 152 } 153 154 dev->in = usb_rcvbulkpipe (dev->udev, 155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 156 dev->out = usb_sndbulkpipe (dev->udev, 157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 158 dev->status = status; 159 return 0; 160 } 161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 162 163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 164 { 165 int tmp, i; 166 unsigned char buf [13]; 167 168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 169 if (tmp != 12) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (tmp >= 0) 173 tmp = -EINVAL; 174 return tmp; 175 } 176 for (i = tmp = 0; i < 6; i++, tmp += 2) 177 dev->net->dev_addr [i] = 178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); 179 return 0; 180 } 181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 182 183 static void intr_complete (struct urb *urb) 184 { 185 struct usbnet *dev = urb->context; 186 int status = urb->status; 187 188 switch (status) { 189 /* success */ 190 case 0: 191 dev->driver_info->status(dev, urb); 192 break; 193 194 /* software-driven interface shutdown */ 195 case -ENOENT: /* urb killed */ 196 case -ESHUTDOWN: /* hardware gone */ 197 netif_dbg(dev, ifdown, dev->net, 198 "intr shutdown, code %d\n", status); 199 return; 200 201 /* NOTE: not throttling like RX/TX, since this endpoint 202 * already polls infrequently 203 */ 204 default: 205 netdev_dbg(dev->net, "intr status %d\n", status); 206 break; 207 } 208 209 if (!netif_running (dev->net)) 210 return; 211 212 status = usb_submit_urb (urb, GFP_ATOMIC); 213 if (status != 0) 214 netif_err(dev, timer, dev->net, 215 "intr resubmit --> %d\n", status); 216 } 217 218 static int init_status (struct usbnet *dev, struct usb_interface *intf) 219 { 220 char *buf = NULL; 221 unsigned pipe = 0; 222 unsigned maxp; 223 unsigned period; 224 225 if (!dev->driver_info->status) 226 return 0; 227 228 pipe = usb_rcvintpipe (dev->udev, 229 dev->status->desc.bEndpointAddress 230 & USB_ENDPOINT_NUMBER_MASK); 231 maxp = usb_maxpacket (dev->udev, pipe, 0); 232 233 /* avoid 1 msec chatter: min 8 msec poll rate */ 234 period = max ((int) dev->status->desc.bInterval, 235 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 236 237 buf = kmalloc (maxp, GFP_KERNEL); 238 if (buf) { 239 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 240 if (!dev->interrupt) { 241 kfree (buf); 242 return -ENOMEM; 243 } else { 244 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 245 buf, maxp, intr_complete, dev, period); 246 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 247 dev_dbg(&intf->dev, 248 "status ep%din, %d bytes period %d\n", 249 usb_pipeendpoint(pipe), maxp, period); 250 } 251 } 252 return 0; 253 } 254 255 /* Passes this packet up the stack, updating its accounting. 256 * Some link protocols batch packets, so their rx_fixup paths 257 * can return clones as well as just modify the original skb. 258 */ 259 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 260 { 261 int status; 262 263 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 264 skb_queue_tail(&dev->rxq_pause, skb); 265 return; 266 } 267 268 skb->protocol = eth_type_trans (skb, dev->net); 269 dev->net->stats.rx_packets++; 270 dev->net->stats.rx_bytes += skb->len; 271 272 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 273 skb->len + sizeof (struct ethhdr), skb->protocol); 274 memset (skb->cb, 0, sizeof (struct skb_data)); 275 276 if (skb_defer_rx_timestamp(skb)) 277 return; 278 279 status = netif_rx (skb); 280 if (status != NET_RX_SUCCESS) 281 netif_dbg(dev, rx_err, dev->net, 282 "netif_rx status %d\n", status); 283 } 284 EXPORT_SYMBOL_GPL(usbnet_skb_return); 285 286 287 /*------------------------------------------------------------------------- 288 * 289 * Network Device Driver (peer link to "Host Device", from USB host) 290 * 291 *-------------------------------------------------------------------------*/ 292 293 int usbnet_change_mtu (struct net_device *net, int new_mtu) 294 { 295 struct usbnet *dev = netdev_priv(net); 296 int ll_mtu = new_mtu + net->hard_header_len; 297 int old_hard_mtu = dev->hard_mtu; 298 int old_rx_urb_size = dev->rx_urb_size; 299 300 if (new_mtu <= 0) 301 return -EINVAL; 302 // no second zero-length packet read wanted after mtu-sized packets 303 if ((ll_mtu % dev->maxpacket) == 0) 304 return -EDOM; 305 net->mtu = new_mtu; 306 307 dev->hard_mtu = net->mtu + net->hard_header_len; 308 if (dev->rx_urb_size == old_hard_mtu) { 309 dev->rx_urb_size = dev->hard_mtu; 310 if (dev->rx_urb_size > old_rx_urb_size) 311 usbnet_unlink_rx_urbs(dev); 312 } 313 314 return 0; 315 } 316 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 317 318 /* The caller must hold list->lock */ 319 static void __usbnet_queue_skb(struct sk_buff_head *list, 320 struct sk_buff *newsk, enum skb_state state) 321 { 322 struct skb_data *entry = (struct skb_data *) newsk->cb; 323 324 __skb_queue_tail(list, newsk); 325 entry->state = state; 326 } 327 328 /*-------------------------------------------------------------------------*/ 329 330 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 331 * completion callbacks. 2.5 should have fixed those bugs... 332 */ 333 334 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 335 struct sk_buff_head *list, enum skb_state state) 336 { 337 unsigned long flags; 338 enum skb_state old_state; 339 struct skb_data *entry = (struct skb_data *) skb->cb; 340 341 spin_lock_irqsave(&list->lock, flags); 342 old_state = entry->state; 343 entry->state = state; 344 __skb_unlink(skb, list); 345 spin_unlock(&list->lock); 346 spin_lock(&dev->done.lock); 347 __skb_queue_tail(&dev->done, skb); 348 if (dev->done.qlen == 1) 349 tasklet_schedule(&dev->bh); 350 spin_unlock_irqrestore(&dev->done.lock, flags); 351 return old_state; 352 } 353 354 /* some work can't be done in tasklets, so we use keventd 355 * 356 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 357 * but tasklet_schedule() doesn't. hope the failure is rare. 358 */ 359 void usbnet_defer_kevent (struct usbnet *dev, int work) 360 { 361 set_bit (work, &dev->flags); 362 if (!schedule_work (&dev->kevent)) { 363 if (net_ratelimit()) 364 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 365 } else { 366 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 367 } 368 } 369 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 370 371 /*-------------------------------------------------------------------------*/ 372 373 static void rx_complete (struct urb *urb); 374 375 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 376 { 377 struct sk_buff *skb; 378 struct skb_data *entry; 379 int retval = 0; 380 unsigned long lockflags; 381 size_t size = dev->rx_urb_size; 382 383 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 384 if (!skb) { 385 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 386 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 387 usb_free_urb (urb); 388 return -ENOMEM; 389 } 390 391 entry = (struct skb_data *) skb->cb; 392 entry->urb = urb; 393 entry->dev = dev; 394 entry->length = 0; 395 396 usb_fill_bulk_urb (urb, dev->udev, dev->in, 397 skb->data, size, rx_complete, skb); 398 399 spin_lock_irqsave (&dev->rxq.lock, lockflags); 400 401 if (netif_running (dev->net) && 402 netif_device_present (dev->net) && 403 !test_bit (EVENT_RX_HALT, &dev->flags) && 404 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 405 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 406 case -EPIPE: 407 usbnet_defer_kevent (dev, EVENT_RX_HALT); 408 break; 409 case -ENOMEM: 410 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 411 break; 412 case -ENODEV: 413 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 414 netif_device_detach (dev->net); 415 break; 416 case -EHOSTUNREACH: 417 retval = -ENOLINK; 418 break; 419 default: 420 netif_dbg(dev, rx_err, dev->net, 421 "rx submit, %d\n", retval); 422 tasklet_schedule (&dev->bh); 423 break; 424 case 0: 425 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 426 } 427 } else { 428 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 429 retval = -ENOLINK; 430 } 431 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 432 if (retval) { 433 dev_kfree_skb_any (skb); 434 usb_free_urb (urb); 435 } 436 return retval; 437 } 438 439 440 /*-------------------------------------------------------------------------*/ 441 442 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 443 { 444 if (dev->driver_info->rx_fixup && 445 !dev->driver_info->rx_fixup (dev, skb)) { 446 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 447 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 448 dev->net->stats.rx_errors++; 449 goto done; 450 } 451 // else network stack removes extra byte if we forced a short packet 452 453 if (skb->len) { 454 /* all data was already cloned from skb inside the driver */ 455 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 456 dev_kfree_skb_any(skb); 457 else 458 usbnet_skb_return(dev, skb); 459 return; 460 } 461 462 netif_dbg(dev, rx_err, dev->net, "drop\n"); 463 dev->net->stats.rx_errors++; 464 done: 465 skb_queue_tail(&dev->done, skb); 466 } 467 468 /*-------------------------------------------------------------------------*/ 469 470 static void rx_complete (struct urb *urb) 471 { 472 struct sk_buff *skb = (struct sk_buff *) urb->context; 473 struct skb_data *entry = (struct skb_data *) skb->cb; 474 struct usbnet *dev = entry->dev; 475 int urb_status = urb->status; 476 enum skb_state state; 477 478 skb_put (skb, urb->actual_length); 479 state = rx_done; 480 entry->urb = NULL; 481 482 switch (urb_status) { 483 /* success */ 484 case 0: 485 if (skb->len < dev->net->hard_header_len) { 486 state = rx_cleanup; 487 dev->net->stats.rx_errors++; 488 dev->net->stats.rx_length_errors++; 489 netif_dbg(dev, rx_err, dev->net, 490 "rx length %d\n", skb->len); 491 } 492 break; 493 494 /* stalls need manual reset. this is rare ... except that 495 * when going through USB 2.0 TTs, unplug appears this way. 496 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 497 * storm, recovering as needed. 498 */ 499 case -EPIPE: 500 dev->net->stats.rx_errors++; 501 usbnet_defer_kevent (dev, EVENT_RX_HALT); 502 // FALLTHROUGH 503 504 /* software-driven interface shutdown */ 505 case -ECONNRESET: /* async unlink */ 506 case -ESHUTDOWN: /* hardware gone */ 507 netif_dbg(dev, ifdown, dev->net, 508 "rx shutdown, code %d\n", urb_status); 509 goto block; 510 511 /* we get controller i/o faults during khubd disconnect() delays. 512 * throttle down resubmits, to avoid log floods; just temporarily, 513 * so we still recover when the fault isn't a khubd delay. 514 */ 515 case -EPROTO: 516 case -ETIME: 517 case -EILSEQ: 518 dev->net->stats.rx_errors++; 519 if (!timer_pending (&dev->delay)) { 520 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 521 netif_dbg(dev, link, dev->net, 522 "rx throttle %d\n", urb_status); 523 } 524 block: 525 state = rx_cleanup; 526 entry->urb = urb; 527 urb = NULL; 528 break; 529 530 /* data overrun ... flush fifo? */ 531 case -EOVERFLOW: 532 dev->net->stats.rx_over_errors++; 533 // FALLTHROUGH 534 535 default: 536 state = rx_cleanup; 537 dev->net->stats.rx_errors++; 538 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 539 break; 540 } 541 542 state = defer_bh(dev, skb, &dev->rxq, state); 543 544 if (urb) { 545 if (netif_running (dev->net) && 546 !test_bit (EVENT_RX_HALT, &dev->flags) && 547 state != unlink_start) { 548 rx_submit (dev, urb, GFP_ATOMIC); 549 usb_mark_last_busy(dev->udev); 550 return; 551 } 552 usb_free_urb (urb); 553 } 554 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 555 } 556 557 /*-------------------------------------------------------------------------*/ 558 void usbnet_pause_rx(struct usbnet *dev) 559 { 560 set_bit(EVENT_RX_PAUSED, &dev->flags); 561 562 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 563 } 564 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 565 566 void usbnet_resume_rx(struct usbnet *dev) 567 { 568 struct sk_buff *skb; 569 int num = 0; 570 571 clear_bit(EVENT_RX_PAUSED, &dev->flags); 572 573 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 574 usbnet_skb_return(dev, skb); 575 num++; 576 } 577 578 tasklet_schedule(&dev->bh); 579 580 netif_dbg(dev, rx_status, dev->net, 581 "paused rx queue disabled, %d skbs requeued\n", num); 582 } 583 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 584 585 void usbnet_purge_paused_rxq(struct usbnet *dev) 586 { 587 skb_queue_purge(&dev->rxq_pause); 588 } 589 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 590 591 /*-------------------------------------------------------------------------*/ 592 593 // unlink pending rx/tx; completion handlers do all other cleanup 594 595 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 596 { 597 unsigned long flags; 598 struct sk_buff *skb; 599 int count = 0; 600 601 spin_lock_irqsave (&q->lock, flags); 602 while (!skb_queue_empty(q)) { 603 struct skb_data *entry; 604 struct urb *urb; 605 int retval; 606 607 skb_queue_walk(q, skb) { 608 entry = (struct skb_data *) skb->cb; 609 if (entry->state != unlink_start) 610 goto found; 611 } 612 break; 613 found: 614 entry->state = unlink_start; 615 urb = entry->urb; 616 617 /* 618 * Get reference count of the URB to avoid it to be 619 * freed during usb_unlink_urb, which may trigger 620 * use-after-free problem inside usb_unlink_urb since 621 * usb_unlink_urb is always racing with .complete 622 * handler(include defer_bh). 623 */ 624 usb_get_urb(urb); 625 spin_unlock_irqrestore(&q->lock, flags); 626 // during some PM-driven resume scenarios, 627 // these (async) unlinks complete immediately 628 retval = usb_unlink_urb (urb); 629 if (retval != -EINPROGRESS && retval != 0) 630 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 631 else 632 count++; 633 usb_put_urb(urb); 634 spin_lock_irqsave(&q->lock, flags); 635 } 636 spin_unlock_irqrestore (&q->lock, flags); 637 return count; 638 } 639 640 // Flush all pending rx urbs 641 // minidrivers may need to do this when the MTU changes 642 643 void usbnet_unlink_rx_urbs(struct usbnet *dev) 644 { 645 if (netif_running(dev->net)) { 646 (void) unlink_urbs (dev, &dev->rxq); 647 tasklet_schedule(&dev->bh); 648 } 649 } 650 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 651 652 /*-------------------------------------------------------------------------*/ 653 654 // precondition: never called in_interrupt 655 static void usbnet_terminate_urbs(struct usbnet *dev) 656 { 657 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); 658 DECLARE_WAITQUEUE(wait, current); 659 int temp; 660 661 /* ensure there are no more active urbs */ 662 add_wait_queue(&unlink_wakeup, &wait); 663 set_current_state(TASK_UNINTERRUPTIBLE); 664 dev->wait = &unlink_wakeup; 665 temp = unlink_urbs(dev, &dev->txq) + 666 unlink_urbs(dev, &dev->rxq); 667 668 /* maybe wait for deletions to finish. */ 669 while (!skb_queue_empty(&dev->rxq) 670 && !skb_queue_empty(&dev->txq) 671 && !skb_queue_empty(&dev->done)) { 672 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 673 set_current_state(TASK_UNINTERRUPTIBLE); 674 netif_dbg(dev, ifdown, dev->net, 675 "waited for %d urb completions\n", temp); 676 } 677 set_current_state(TASK_RUNNING); 678 dev->wait = NULL; 679 remove_wait_queue(&unlink_wakeup, &wait); 680 } 681 682 int usbnet_stop (struct net_device *net) 683 { 684 struct usbnet *dev = netdev_priv(net); 685 struct driver_info *info = dev->driver_info; 686 int retval; 687 688 clear_bit(EVENT_DEV_OPEN, &dev->flags); 689 netif_stop_queue (net); 690 691 netif_info(dev, ifdown, dev->net, 692 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 693 net->stats.rx_packets, net->stats.tx_packets, 694 net->stats.rx_errors, net->stats.tx_errors); 695 696 /* allow minidriver to stop correctly (wireless devices to turn off 697 * radio etc) */ 698 if (info->stop) { 699 retval = info->stop(dev); 700 if (retval < 0) 701 netif_info(dev, ifdown, dev->net, 702 "stop fail (%d) usbnet usb-%s-%s, %s\n", 703 retval, 704 dev->udev->bus->bus_name, dev->udev->devpath, 705 info->description); 706 } 707 708 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 709 usbnet_terminate_urbs(dev); 710 711 usb_kill_urb(dev->interrupt); 712 713 usbnet_purge_paused_rxq(dev); 714 715 /* deferred work (task, timer, softirq) must also stop. 716 * can't flush_scheduled_work() until we drop rtnl (later), 717 * else workers could deadlock; so make workers a NOP. 718 */ 719 dev->flags = 0; 720 del_timer_sync (&dev->delay); 721 tasklet_kill (&dev->bh); 722 if (info->manage_power) 723 info->manage_power(dev, 0); 724 else 725 usb_autopm_put_interface(dev->intf); 726 727 return 0; 728 } 729 EXPORT_SYMBOL_GPL(usbnet_stop); 730 731 /*-------------------------------------------------------------------------*/ 732 733 // posts reads, and enables write queuing 734 735 // precondition: never called in_interrupt 736 737 int usbnet_open (struct net_device *net) 738 { 739 struct usbnet *dev = netdev_priv(net); 740 int retval; 741 struct driver_info *info = dev->driver_info; 742 743 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 744 netif_info(dev, ifup, dev->net, 745 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 746 retval, 747 dev->udev->bus->bus_name, 748 dev->udev->devpath, 749 info->description); 750 goto done_nopm; 751 } 752 753 // put into "known safe" state 754 if (info->reset && (retval = info->reset (dev)) < 0) { 755 netif_info(dev, ifup, dev->net, 756 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 757 retval, 758 dev->udev->bus->bus_name, 759 dev->udev->devpath, 760 info->description); 761 goto done; 762 } 763 764 // insist peer be connected 765 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 766 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); 767 goto done; 768 } 769 770 /* start any status interrupt transfer */ 771 if (dev->interrupt) { 772 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 773 if (retval < 0) { 774 netif_err(dev, ifup, dev->net, 775 "intr submit %d\n", retval); 776 goto done; 777 } 778 } 779 780 set_bit(EVENT_DEV_OPEN, &dev->flags); 781 netif_start_queue (net); 782 netif_info(dev, ifup, dev->net, 783 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 784 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 785 dev->net->mtu, 786 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 787 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 788 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 789 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 790 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 791 "simple"); 792 793 // delay posting reads until we're fully open 794 tasklet_schedule (&dev->bh); 795 if (info->manage_power) { 796 retval = info->manage_power(dev, 1); 797 if (retval < 0) 798 goto done_manage_power_error; 799 usb_autopm_put_interface(dev->intf); 800 } 801 return retval; 802 803 done_manage_power_error: 804 clear_bit(EVENT_DEV_OPEN, &dev->flags); 805 done: 806 usb_autopm_put_interface(dev->intf); 807 done_nopm: 808 return retval; 809 } 810 EXPORT_SYMBOL_GPL(usbnet_open); 811 812 /*-------------------------------------------------------------------------*/ 813 814 /* ethtool methods; minidrivers may need to add some more, but 815 * they'll probably want to use this base set. 816 */ 817 818 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 819 { 820 struct usbnet *dev = netdev_priv(net); 821 822 if (!dev->mii.mdio_read) 823 return -EOPNOTSUPP; 824 825 return mii_ethtool_gset(&dev->mii, cmd); 826 } 827 EXPORT_SYMBOL_GPL(usbnet_get_settings); 828 829 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 830 { 831 struct usbnet *dev = netdev_priv(net); 832 int retval; 833 834 if (!dev->mii.mdio_write) 835 return -EOPNOTSUPP; 836 837 retval = mii_ethtool_sset(&dev->mii, cmd); 838 839 /* link speed/duplex might have changed */ 840 if (dev->driver_info->link_reset) 841 dev->driver_info->link_reset(dev); 842 843 return retval; 844 845 } 846 EXPORT_SYMBOL_GPL(usbnet_set_settings); 847 848 u32 usbnet_get_link (struct net_device *net) 849 { 850 struct usbnet *dev = netdev_priv(net); 851 852 /* If a check_connect is defined, return its result */ 853 if (dev->driver_info->check_connect) 854 return dev->driver_info->check_connect (dev) == 0; 855 856 /* if the device has mii operations, use those */ 857 if (dev->mii.mdio_read) 858 return mii_link_ok(&dev->mii); 859 860 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 861 return ethtool_op_get_link(net); 862 } 863 EXPORT_SYMBOL_GPL(usbnet_get_link); 864 865 int usbnet_nway_reset(struct net_device *net) 866 { 867 struct usbnet *dev = netdev_priv(net); 868 869 if (!dev->mii.mdio_write) 870 return -EOPNOTSUPP; 871 872 return mii_nway_restart(&dev->mii); 873 } 874 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 875 876 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 877 { 878 struct usbnet *dev = netdev_priv(net); 879 880 strlcpy (info->driver, dev->driver_name, sizeof info->driver); 881 strlcpy (info->version, DRIVER_VERSION, sizeof info->version); 882 strlcpy (info->fw_version, dev->driver_info->description, 883 sizeof info->fw_version); 884 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 885 } 886 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 887 888 u32 usbnet_get_msglevel (struct net_device *net) 889 { 890 struct usbnet *dev = netdev_priv(net); 891 892 return dev->msg_enable; 893 } 894 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 895 896 void usbnet_set_msglevel (struct net_device *net, u32 level) 897 { 898 struct usbnet *dev = netdev_priv(net); 899 900 dev->msg_enable = level; 901 } 902 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 903 904 /* drivers may override default ethtool_ops in their bind() routine */ 905 static const struct ethtool_ops usbnet_ethtool_ops = { 906 .get_settings = usbnet_get_settings, 907 .set_settings = usbnet_set_settings, 908 .get_link = usbnet_get_link, 909 .nway_reset = usbnet_nway_reset, 910 .get_drvinfo = usbnet_get_drvinfo, 911 .get_msglevel = usbnet_get_msglevel, 912 .set_msglevel = usbnet_set_msglevel, 913 .get_ts_info = ethtool_op_get_ts_info, 914 }; 915 916 /*-------------------------------------------------------------------------*/ 917 918 /* work that cannot be done in interrupt context uses keventd. 919 * 920 * NOTE: with 2.5 we could do more of this using completion callbacks, 921 * especially now that control transfers can be queued. 922 */ 923 static void 924 kevent (struct work_struct *work) 925 { 926 struct usbnet *dev = 927 container_of(work, struct usbnet, kevent); 928 int status; 929 930 /* usb_clear_halt() needs a thread context */ 931 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 932 unlink_urbs (dev, &dev->txq); 933 status = usb_autopm_get_interface(dev->intf); 934 if (status < 0) 935 goto fail_pipe; 936 status = usb_clear_halt (dev->udev, dev->out); 937 usb_autopm_put_interface(dev->intf); 938 if (status < 0 && 939 status != -EPIPE && 940 status != -ESHUTDOWN) { 941 if (netif_msg_tx_err (dev)) 942 fail_pipe: 943 netdev_err(dev->net, "can't clear tx halt, status %d\n", 944 status); 945 } else { 946 clear_bit (EVENT_TX_HALT, &dev->flags); 947 if (status != -ESHUTDOWN) 948 netif_wake_queue (dev->net); 949 } 950 } 951 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 952 unlink_urbs (dev, &dev->rxq); 953 status = usb_autopm_get_interface(dev->intf); 954 if (status < 0) 955 goto fail_halt; 956 status = usb_clear_halt (dev->udev, dev->in); 957 usb_autopm_put_interface(dev->intf); 958 if (status < 0 && 959 status != -EPIPE && 960 status != -ESHUTDOWN) { 961 if (netif_msg_rx_err (dev)) 962 fail_halt: 963 netdev_err(dev->net, "can't clear rx halt, status %d\n", 964 status); 965 } else { 966 clear_bit (EVENT_RX_HALT, &dev->flags); 967 tasklet_schedule (&dev->bh); 968 } 969 } 970 971 /* tasklet could resubmit itself forever if memory is tight */ 972 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 973 struct urb *urb = NULL; 974 int resched = 1; 975 976 if (netif_running (dev->net)) 977 urb = usb_alloc_urb (0, GFP_KERNEL); 978 else 979 clear_bit (EVENT_RX_MEMORY, &dev->flags); 980 if (urb != NULL) { 981 clear_bit (EVENT_RX_MEMORY, &dev->flags); 982 status = usb_autopm_get_interface(dev->intf); 983 if (status < 0) { 984 usb_free_urb(urb); 985 goto fail_lowmem; 986 } 987 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 988 resched = 0; 989 usb_autopm_put_interface(dev->intf); 990 fail_lowmem: 991 if (resched) 992 tasklet_schedule (&dev->bh); 993 } 994 } 995 996 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 997 struct driver_info *info = dev->driver_info; 998 int retval = 0; 999 1000 clear_bit (EVENT_LINK_RESET, &dev->flags); 1001 status = usb_autopm_get_interface(dev->intf); 1002 if (status < 0) 1003 goto skip_reset; 1004 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1005 usb_autopm_put_interface(dev->intf); 1006 skip_reset: 1007 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1008 retval, 1009 dev->udev->bus->bus_name, 1010 dev->udev->devpath, 1011 info->description); 1012 } else { 1013 usb_autopm_put_interface(dev->intf); 1014 } 1015 } 1016 1017 if (dev->flags) 1018 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1019 } 1020 1021 /*-------------------------------------------------------------------------*/ 1022 1023 static void tx_complete (struct urb *urb) 1024 { 1025 struct sk_buff *skb = (struct sk_buff *) urb->context; 1026 struct skb_data *entry = (struct skb_data *) skb->cb; 1027 struct usbnet *dev = entry->dev; 1028 1029 if (urb->status == 0) { 1030 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) 1031 dev->net->stats.tx_packets++; 1032 dev->net->stats.tx_bytes += entry->length; 1033 } else { 1034 dev->net->stats.tx_errors++; 1035 1036 switch (urb->status) { 1037 case -EPIPE: 1038 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1039 break; 1040 1041 /* software-driven interface shutdown */ 1042 case -ECONNRESET: // async unlink 1043 case -ESHUTDOWN: // hardware gone 1044 break; 1045 1046 // like rx, tx gets controller i/o faults during khubd delays 1047 // and so it uses the same throttling mechanism. 1048 case -EPROTO: 1049 case -ETIME: 1050 case -EILSEQ: 1051 usb_mark_last_busy(dev->udev); 1052 if (!timer_pending (&dev->delay)) { 1053 mod_timer (&dev->delay, 1054 jiffies + THROTTLE_JIFFIES); 1055 netif_dbg(dev, link, dev->net, 1056 "tx throttle %d\n", urb->status); 1057 } 1058 netif_stop_queue (dev->net); 1059 break; 1060 default: 1061 netif_dbg(dev, tx_err, dev->net, 1062 "tx err %d\n", entry->urb->status); 1063 break; 1064 } 1065 } 1066 1067 usb_autopm_put_interface_async(dev->intf); 1068 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1069 } 1070 1071 /*-------------------------------------------------------------------------*/ 1072 1073 void usbnet_tx_timeout (struct net_device *net) 1074 { 1075 struct usbnet *dev = netdev_priv(net); 1076 1077 unlink_urbs (dev, &dev->txq); 1078 tasklet_schedule (&dev->bh); 1079 1080 // FIXME: device recovery -- reset? 1081 } 1082 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1083 1084 /*-------------------------------------------------------------------------*/ 1085 1086 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1087 struct net_device *net) 1088 { 1089 struct usbnet *dev = netdev_priv(net); 1090 int length; 1091 struct urb *urb = NULL; 1092 struct skb_data *entry; 1093 struct driver_info *info = dev->driver_info; 1094 unsigned long flags; 1095 int retval; 1096 1097 if (skb) 1098 skb_tx_timestamp(skb); 1099 1100 // some devices want funky USB-level framing, for 1101 // win32 driver (usually) and/or hardware quirks 1102 if (info->tx_fixup) { 1103 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1104 if (!skb) { 1105 if (netif_msg_tx_err(dev)) { 1106 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1107 goto drop; 1108 } else { 1109 /* cdc_ncm collected packet; waits for more */ 1110 goto not_drop; 1111 } 1112 } 1113 } 1114 length = skb->len; 1115 1116 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1117 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1118 goto drop; 1119 } 1120 1121 entry = (struct skb_data *) skb->cb; 1122 entry->urb = urb; 1123 entry->dev = dev; 1124 entry->length = length; 1125 1126 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1127 skb->data, skb->len, tx_complete, skb); 1128 1129 /* don't assume the hardware handles USB_ZERO_PACKET 1130 * NOTE: strictly conforming cdc-ether devices should expect 1131 * the ZLP here, but ignore the one-byte packet. 1132 * NOTE2: CDC NCM specification is different from CDC ECM when 1133 * handling ZLP/short packets, so cdc_ncm driver will make short 1134 * packet itself if needed. 1135 */ 1136 if (length % dev->maxpacket == 0) { 1137 if (!(info->flags & FLAG_SEND_ZLP)) { 1138 if (!(info->flags & FLAG_MULTI_PACKET)) { 1139 urb->transfer_buffer_length++; 1140 if (skb_tailroom(skb)) { 1141 skb->data[skb->len] = 0; 1142 __skb_put(skb, 1); 1143 } 1144 } 1145 } else 1146 urb->transfer_flags |= URB_ZERO_PACKET; 1147 } 1148 1149 spin_lock_irqsave(&dev->txq.lock, flags); 1150 retval = usb_autopm_get_interface_async(dev->intf); 1151 if (retval < 0) { 1152 spin_unlock_irqrestore(&dev->txq.lock, flags); 1153 goto drop; 1154 } 1155 1156 #ifdef CONFIG_PM 1157 /* if this triggers the device is still a sleep */ 1158 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1159 /* transmission will be done in resume */ 1160 usb_anchor_urb(urb, &dev->deferred); 1161 /* no use to process more packets */ 1162 netif_stop_queue(net); 1163 usb_put_urb(urb); 1164 spin_unlock_irqrestore(&dev->txq.lock, flags); 1165 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1166 goto deferred; 1167 } 1168 #endif 1169 1170 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1171 case -EPIPE: 1172 netif_stop_queue (net); 1173 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1174 usb_autopm_put_interface_async(dev->intf); 1175 break; 1176 default: 1177 usb_autopm_put_interface_async(dev->intf); 1178 netif_dbg(dev, tx_err, dev->net, 1179 "tx: submit urb err %d\n", retval); 1180 break; 1181 case 0: 1182 net->trans_start = jiffies; 1183 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1184 if (dev->txq.qlen >= TX_QLEN (dev)) 1185 netif_stop_queue (net); 1186 } 1187 spin_unlock_irqrestore (&dev->txq.lock, flags); 1188 1189 if (retval) { 1190 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1191 drop: 1192 dev->net->stats.tx_dropped++; 1193 not_drop: 1194 if (skb) 1195 dev_kfree_skb_any (skb); 1196 usb_free_urb (urb); 1197 } else 1198 netif_dbg(dev, tx_queued, dev->net, 1199 "> tx, len %d, type 0x%x\n", length, skb->protocol); 1200 #ifdef CONFIG_PM 1201 deferred: 1202 #endif 1203 return NETDEV_TX_OK; 1204 } 1205 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1206 1207 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1208 { 1209 struct urb *urb; 1210 int i; 1211 int ret = 0; 1212 1213 /* don't refill the queue all at once */ 1214 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1215 urb = usb_alloc_urb(0, flags); 1216 if (urb != NULL) { 1217 ret = rx_submit(dev, urb, flags); 1218 if (ret) 1219 goto err; 1220 } else { 1221 ret = -ENOMEM; 1222 goto err; 1223 } 1224 } 1225 err: 1226 return ret; 1227 } 1228 1229 /*-------------------------------------------------------------------------*/ 1230 1231 // tasklet (work deferred from completions, in_irq) or timer 1232 1233 static void usbnet_bh (unsigned long param) 1234 { 1235 struct usbnet *dev = (struct usbnet *) param; 1236 struct sk_buff *skb; 1237 struct skb_data *entry; 1238 1239 while ((skb = skb_dequeue (&dev->done))) { 1240 entry = (struct skb_data *) skb->cb; 1241 switch (entry->state) { 1242 case rx_done: 1243 entry->state = rx_cleanup; 1244 rx_process (dev, skb); 1245 continue; 1246 case tx_done: 1247 case rx_cleanup: 1248 usb_free_urb (entry->urb); 1249 dev_kfree_skb (skb); 1250 continue; 1251 default: 1252 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1253 } 1254 } 1255 1256 // waiting for all pending urbs to complete? 1257 if (dev->wait) { 1258 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1259 wake_up (dev->wait); 1260 } 1261 1262 // or are we maybe short a few urbs? 1263 } else if (netif_running (dev->net) && 1264 netif_device_present (dev->net) && 1265 !timer_pending (&dev->delay) && 1266 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1267 int temp = dev->rxq.qlen; 1268 1269 if (temp < RX_QLEN(dev)) { 1270 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1271 return; 1272 if (temp != dev->rxq.qlen) 1273 netif_dbg(dev, link, dev->net, 1274 "rxqlen %d --> %d\n", 1275 temp, dev->rxq.qlen); 1276 if (dev->rxq.qlen < RX_QLEN(dev)) 1277 tasklet_schedule (&dev->bh); 1278 } 1279 if (dev->txq.qlen < TX_QLEN (dev)) 1280 netif_wake_queue (dev->net); 1281 } 1282 } 1283 1284 1285 /*------------------------------------------------------------------------- 1286 * 1287 * USB Device Driver support 1288 * 1289 *-------------------------------------------------------------------------*/ 1290 1291 // precondition: never called in_interrupt 1292 1293 void usbnet_disconnect (struct usb_interface *intf) 1294 { 1295 struct usbnet *dev; 1296 struct usb_device *xdev; 1297 struct net_device *net; 1298 1299 dev = usb_get_intfdata(intf); 1300 usb_set_intfdata(intf, NULL); 1301 if (!dev) 1302 return; 1303 1304 xdev = interface_to_usbdev (intf); 1305 1306 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1307 intf->dev.driver->name, 1308 xdev->bus->bus_name, xdev->devpath, 1309 dev->driver_info->description); 1310 1311 net = dev->net; 1312 unregister_netdev (net); 1313 1314 cancel_work_sync(&dev->kevent); 1315 1316 usb_scuttle_anchored_urbs(&dev->deferred); 1317 1318 if (dev->driver_info->unbind) 1319 dev->driver_info->unbind (dev, intf); 1320 1321 usb_kill_urb(dev->interrupt); 1322 usb_free_urb(dev->interrupt); 1323 1324 free_netdev(net); 1325 } 1326 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1327 1328 static const struct net_device_ops usbnet_netdev_ops = { 1329 .ndo_open = usbnet_open, 1330 .ndo_stop = usbnet_stop, 1331 .ndo_start_xmit = usbnet_start_xmit, 1332 .ndo_tx_timeout = usbnet_tx_timeout, 1333 .ndo_change_mtu = usbnet_change_mtu, 1334 .ndo_set_mac_address = eth_mac_addr, 1335 .ndo_validate_addr = eth_validate_addr, 1336 }; 1337 1338 /*-------------------------------------------------------------------------*/ 1339 1340 // precondition: never called in_interrupt 1341 1342 static struct device_type wlan_type = { 1343 .name = "wlan", 1344 }; 1345 1346 static struct device_type wwan_type = { 1347 .name = "wwan", 1348 }; 1349 1350 int 1351 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1352 { 1353 struct usbnet *dev; 1354 struct net_device *net; 1355 struct usb_host_interface *interface; 1356 struct driver_info *info; 1357 struct usb_device *xdev; 1358 int status; 1359 const char *name; 1360 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1361 1362 /* usbnet already took usb runtime pm, so have to enable the feature 1363 * for usb interface, otherwise usb_autopm_get_interface may return 1364 * failure if USB_SUSPEND(RUNTIME_PM) is enabled. 1365 */ 1366 if (!driver->supports_autosuspend) { 1367 driver->supports_autosuspend = 1; 1368 pm_runtime_enable(&udev->dev); 1369 } 1370 1371 name = udev->dev.driver->name; 1372 info = (struct driver_info *) prod->driver_info; 1373 if (!info) { 1374 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1375 return -ENODEV; 1376 } 1377 xdev = interface_to_usbdev (udev); 1378 interface = udev->cur_altsetting; 1379 1380 status = -ENOMEM; 1381 1382 // set up our own records 1383 net = alloc_etherdev(sizeof(*dev)); 1384 if (!net) 1385 goto out; 1386 1387 /* netdev_printk() needs this so do it as early as possible */ 1388 SET_NETDEV_DEV(net, &udev->dev); 1389 1390 dev = netdev_priv(net); 1391 dev->udev = xdev; 1392 dev->intf = udev; 1393 dev->driver_info = info; 1394 dev->driver_name = name; 1395 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1396 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1397 skb_queue_head_init (&dev->rxq); 1398 skb_queue_head_init (&dev->txq); 1399 skb_queue_head_init (&dev->done); 1400 skb_queue_head_init(&dev->rxq_pause); 1401 dev->bh.func = usbnet_bh; 1402 dev->bh.data = (unsigned long) dev; 1403 INIT_WORK (&dev->kevent, kevent); 1404 init_usb_anchor(&dev->deferred); 1405 dev->delay.function = usbnet_bh; 1406 dev->delay.data = (unsigned long) dev; 1407 init_timer (&dev->delay); 1408 mutex_init (&dev->phy_mutex); 1409 1410 dev->net = net; 1411 strcpy (net->name, "usb%d"); 1412 memcpy (net->dev_addr, node_id, sizeof node_id); 1413 1414 /* rx and tx sides can use different message sizes; 1415 * bind() should set rx_urb_size in that case. 1416 */ 1417 dev->hard_mtu = net->mtu + net->hard_header_len; 1418 #if 0 1419 // dma_supported() is deeply broken on almost all architectures 1420 // possible with some EHCI controllers 1421 if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) 1422 net->features |= NETIF_F_HIGHDMA; 1423 #endif 1424 1425 net->netdev_ops = &usbnet_netdev_ops; 1426 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1427 net->ethtool_ops = &usbnet_ethtool_ops; 1428 1429 // allow device-specific bind/init procedures 1430 // NOTE net->name still not usable ... 1431 if (info->bind) { 1432 status = info->bind (dev, udev); 1433 if (status < 0) 1434 goto out1; 1435 1436 // heuristic: "usb%d" for links we know are two-host, 1437 // else "eth%d" when there's reasonable doubt. userspace 1438 // can rename the link if it knows better. 1439 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1440 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1441 (net->dev_addr [0] & 0x02) == 0)) 1442 strcpy (net->name, "eth%d"); 1443 /* WLAN devices should always be named "wlan%d" */ 1444 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1445 strcpy(net->name, "wlan%d"); 1446 /* WWAN devices should always be named "wwan%d" */ 1447 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1448 strcpy(net->name, "wwan%d"); 1449 1450 /* maybe the remote can't receive an Ethernet MTU */ 1451 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1452 net->mtu = dev->hard_mtu - net->hard_header_len; 1453 } else if (!info->in || !info->out) 1454 status = usbnet_get_endpoints (dev, udev); 1455 else { 1456 dev->in = usb_rcvbulkpipe (xdev, info->in); 1457 dev->out = usb_sndbulkpipe (xdev, info->out); 1458 if (!(info->flags & FLAG_NO_SETINT)) 1459 status = usb_set_interface (xdev, 1460 interface->desc.bInterfaceNumber, 1461 interface->desc.bAlternateSetting); 1462 else 1463 status = 0; 1464 1465 } 1466 if (status >= 0 && dev->status) 1467 status = init_status (dev, udev); 1468 if (status < 0) 1469 goto out3; 1470 1471 if (!dev->rx_urb_size) 1472 dev->rx_urb_size = dev->hard_mtu; 1473 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1474 1475 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1476 SET_NETDEV_DEVTYPE(net, &wlan_type); 1477 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1478 SET_NETDEV_DEVTYPE(net, &wwan_type); 1479 1480 status = register_netdev (net); 1481 if (status) 1482 goto out4; 1483 netif_info(dev, probe, dev->net, 1484 "register '%s' at usb-%s-%s, %s, %pM\n", 1485 udev->dev.driver->name, 1486 xdev->bus->bus_name, xdev->devpath, 1487 dev->driver_info->description, 1488 net->dev_addr); 1489 1490 // ok, it's ready to go. 1491 usb_set_intfdata (udev, dev); 1492 1493 netif_device_attach (net); 1494 1495 if (dev->driver_info->flags & FLAG_LINK_INTR) 1496 netif_carrier_off(net); 1497 1498 return 0; 1499 1500 out4: 1501 usb_free_urb(dev->interrupt); 1502 out3: 1503 if (info->unbind) 1504 info->unbind (dev, udev); 1505 out1: 1506 free_netdev(net); 1507 out: 1508 return status; 1509 } 1510 EXPORT_SYMBOL_GPL(usbnet_probe); 1511 1512 /*-------------------------------------------------------------------------*/ 1513 1514 /* 1515 * suspend the whole driver as soon as the first interface is suspended 1516 * resume only when the last interface is resumed 1517 */ 1518 1519 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1520 { 1521 struct usbnet *dev = usb_get_intfdata(intf); 1522 1523 if (!dev->suspend_count++) { 1524 spin_lock_irq(&dev->txq.lock); 1525 /* don't autosuspend while transmitting */ 1526 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1527 dev->suspend_count--; 1528 spin_unlock_irq(&dev->txq.lock); 1529 return -EBUSY; 1530 } else { 1531 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1532 spin_unlock_irq(&dev->txq.lock); 1533 } 1534 /* 1535 * accelerate emptying of the rx and queues, to avoid 1536 * having everything error out. 1537 */ 1538 netif_device_detach (dev->net); 1539 usbnet_terminate_urbs(dev); 1540 usb_kill_urb(dev->interrupt); 1541 1542 /* 1543 * reattach so runtime management can use and 1544 * wake the device 1545 */ 1546 netif_device_attach (dev->net); 1547 } 1548 return 0; 1549 } 1550 EXPORT_SYMBOL_GPL(usbnet_suspend); 1551 1552 int usbnet_resume (struct usb_interface *intf) 1553 { 1554 struct usbnet *dev = usb_get_intfdata(intf); 1555 struct sk_buff *skb; 1556 struct urb *res; 1557 int retval; 1558 1559 if (!--dev->suspend_count) { 1560 /* resume interrupt URBs */ 1561 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) 1562 usb_submit_urb(dev->interrupt, GFP_NOIO); 1563 1564 spin_lock_irq(&dev->txq.lock); 1565 while ((res = usb_get_from_anchor(&dev->deferred))) { 1566 1567 skb = (struct sk_buff *)res->context; 1568 retval = usb_submit_urb(res, GFP_ATOMIC); 1569 if (retval < 0) { 1570 dev_kfree_skb_any(skb); 1571 usb_free_urb(res); 1572 usb_autopm_put_interface_async(dev->intf); 1573 } else { 1574 dev->net->trans_start = jiffies; 1575 __skb_queue_tail(&dev->txq, skb); 1576 } 1577 } 1578 1579 smp_mb(); 1580 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1581 spin_unlock_irq(&dev->txq.lock); 1582 1583 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1584 /* handle remote wakeup ASAP */ 1585 if (!dev->wait && 1586 netif_device_present(dev->net) && 1587 !timer_pending(&dev->delay) && 1588 !test_bit(EVENT_RX_HALT, &dev->flags)) 1589 rx_alloc_submit(dev, GFP_NOIO); 1590 1591 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1592 netif_tx_wake_all_queues(dev->net); 1593 tasklet_schedule (&dev->bh); 1594 } 1595 } 1596 1597 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1598 usb_autopm_get_interface_no_resume(intf); 1599 1600 return 0; 1601 } 1602 EXPORT_SYMBOL_GPL(usbnet_resume); 1603 1604 /* 1605 * Either a subdriver implements manage_power, then it is assumed to always 1606 * be ready to be suspended or it reports the readiness to be suspended 1607 * explicitly 1608 */ 1609 void usbnet_device_suggests_idle(struct usbnet *dev) 1610 { 1611 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1612 dev->intf->needs_remote_wakeup = 1; 1613 usb_autopm_put_interface_async(dev->intf); 1614 } 1615 } 1616 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1617 1618 /*-------------------------------------------------------------------------*/ 1619 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1620 u16 value, u16 index, void *data, u16 size) 1621 { 1622 void *buf = NULL; 1623 int err = -ENOMEM; 1624 1625 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 1626 " value=0x%04x index=0x%04x size=%d\n", 1627 cmd, reqtype, value, index, size); 1628 1629 if (data) { 1630 buf = kmalloc(size, GFP_KERNEL); 1631 if (!buf) 1632 goto out; 1633 } 1634 1635 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1636 cmd, reqtype, value, index, buf, size, 1637 USB_CTRL_GET_TIMEOUT); 1638 if (err > 0 && err <= size) 1639 memcpy(data, buf, err); 1640 kfree(buf); 1641 out: 1642 return err; 1643 } 1644 1645 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1646 u16 value, u16 index, const void *data, 1647 u16 size) 1648 { 1649 void *buf = NULL; 1650 int err = -ENOMEM; 1651 1652 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 1653 " value=0x%04x index=0x%04x size=%d\n", 1654 cmd, reqtype, value, index, size); 1655 1656 if (data) { 1657 buf = kmemdup(data, size, GFP_KERNEL); 1658 if (!buf) 1659 goto out; 1660 } 1661 1662 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1663 cmd, reqtype, value, index, buf, size, 1664 USB_CTRL_SET_TIMEOUT); 1665 kfree(buf); 1666 1667 out: 1668 return err; 1669 } 1670 1671 /* 1672 * The function can't be called inside suspend/resume callback, 1673 * otherwise deadlock will be caused. 1674 */ 1675 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1676 u16 value, u16 index, void *data, u16 size) 1677 { 1678 int ret; 1679 1680 if (usb_autopm_get_interface(dev->intf) < 0) 1681 return -ENODEV; 1682 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 1683 data, size); 1684 usb_autopm_put_interface(dev->intf); 1685 return ret; 1686 } 1687 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 1688 1689 /* 1690 * The function can't be called inside suspend/resume callback, 1691 * otherwise deadlock will be caused. 1692 */ 1693 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1694 u16 value, u16 index, const void *data, u16 size) 1695 { 1696 int ret; 1697 1698 if (usb_autopm_get_interface(dev->intf) < 0) 1699 return -ENODEV; 1700 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 1701 data, size); 1702 usb_autopm_put_interface(dev->intf); 1703 return ret; 1704 } 1705 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 1706 1707 /* 1708 * The function can be called inside suspend/resume callback safely 1709 * and should only be called by suspend/resume callback generally. 1710 */ 1711 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 1712 u16 value, u16 index, void *data, u16 size) 1713 { 1714 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 1715 data, size); 1716 } 1717 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 1718 1719 /* 1720 * The function can be called inside suspend/resume callback safely 1721 * and should only be called by suspend/resume callback generally. 1722 */ 1723 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 1724 u16 value, u16 index, const void *data, 1725 u16 size) 1726 { 1727 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 1728 data, size); 1729 } 1730 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 1731 1732 static void usbnet_async_cmd_cb(struct urb *urb) 1733 { 1734 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 1735 int status = urb->status; 1736 1737 if (status < 0) 1738 dev_dbg(&urb->dev->dev, "%s failed with %d", 1739 __func__, status); 1740 1741 kfree(req); 1742 usb_free_urb(urb); 1743 } 1744 1745 /* 1746 * The caller must make sure that device can't be put into suspend 1747 * state until the control URB completes. 1748 */ 1749 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 1750 u16 value, u16 index, const void *data, u16 size) 1751 { 1752 struct usb_ctrlrequest *req = NULL; 1753 struct urb *urb; 1754 int err = -ENOMEM; 1755 void *buf = NULL; 1756 1757 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 1758 " value=0x%04x index=0x%04x size=%d\n", 1759 cmd, reqtype, value, index, size); 1760 1761 urb = usb_alloc_urb(0, GFP_ATOMIC); 1762 if (!urb) { 1763 netdev_err(dev->net, "Error allocating URB in" 1764 " %s!\n", __func__); 1765 goto fail; 1766 } 1767 1768 if (data) { 1769 buf = kmemdup(data, size, GFP_ATOMIC); 1770 if (!buf) { 1771 netdev_err(dev->net, "Error allocating buffer" 1772 " in %s!\n", __func__); 1773 goto fail_free; 1774 } 1775 } 1776 1777 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 1778 if (!req) { 1779 netdev_err(dev->net, "Failed to allocate memory for %s\n", 1780 __func__); 1781 goto fail_free_buf; 1782 } 1783 1784 req->bRequestType = reqtype; 1785 req->bRequest = cmd; 1786 req->wValue = cpu_to_le16(value); 1787 req->wIndex = cpu_to_le16(index); 1788 req->wLength = cpu_to_le16(size); 1789 1790 usb_fill_control_urb(urb, dev->udev, 1791 usb_sndctrlpipe(dev->udev, 0), 1792 (void *)req, buf, size, 1793 usbnet_async_cmd_cb, req); 1794 urb->transfer_flags |= URB_FREE_BUFFER; 1795 1796 err = usb_submit_urb(urb, GFP_ATOMIC); 1797 if (err < 0) { 1798 netdev_err(dev->net, "Error submitting the control" 1799 " message: status=%d\n", err); 1800 goto fail_free; 1801 } 1802 return 0; 1803 1804 fail_free_buf: 1805 kfree(buf); 1806 fail_free: 1807 kfree(req); 1808 usb_free_urb(urb); 1809 fail: 1810 return err; 1811 1812 } 1813 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 1814 /*-------------------------------------------------------------------------*/ 1815 1816 static int __init usbnet_init(void) 1817 { 1818 /* Compiler should optimize this out. */ 1819 BUILD_BUG_ON( 1820 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 1821 1822 eth_random_addr(node_id); 1823 return 0; 1824 } 1825 module_init(usbnet_init); 1826 1827 static void __exit usbnet_exit(void) 1828 { 1829 } 1830 module_exit(usbnet_exit); 1831 1832 MODULE_AUTHOR("David Brownell"); 1833 MODULE_DESCRIPTION("USB network driver framework"); 1834 MODULE_LICENSE("GPL"); 1835