1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * USB Network driver infrastructure 4 * Copyright (C) 2000-2005 by David Brownell 5 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 6 */ 7 8 /* 9 * This is a generic "USB networking" framework that works with several 10 * kinds of full and high speed networking devices: host-to-host cables, 11 * smart usb peripherals, and actual Ethernet adapters. 12 * 13 * These devices usually differ in terms of control protocols (if they 14 * even have one!) and sometimes they define new framing to wrap or batch 15 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 16 * so interface (un)binding, endpoint I/O queues, fault handling, and other 17 * issues can usefully be addressed by this framework. 18 */ 19 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/ctype.h> 25 #include <linux/ethtool.h> 26 #include <linux/workqueue.h> 27 #include <linux/mii.h> 28 #include <linux/usb.h> 29 #include <linux/usb/usbnet.h> 30 #include <linux/slab.h> 31 #include <linux/kernel.h> 32 #include <linux/pm_runtime.h> 33 34 /*-------------------------------------------------------------------------*/ 35 36 /* 37 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 38 * Several dozen bytes of IPv4 data can fit in two such transactions. 39 * One maximum size Ethernet packet takes twenty four of them. 40 * For high speed, each frame comfortably fits almost 36 max size 41 * Ethernet packets (so queues should be bigger). 42 * 43 * The goal is to let the USB host controller be busy for 5msec or 44 * more before an irq is required, under load. Jumbograms change 45 * the equation. 46 */ 47 #define MAX_QUEUE_MEMORY (60 * 1518) 48 #define RX_QLEN(dev) ((dev)->rx_qlen) 49 #define TX_QLEN(dev) ((dev)->tx_qlen) 50 51 // reawaken network queue this soon after stopping; else watchdog barks 52 #define TX_TIMEOUT_JIFFIES (5*HZ) 53 54 /* throttle rx/tx briefly after some faults, so hub_wq might disconnect() 55 * us (it polls at HZ/4 usually) before we report too many false errors. 56 */ 57 #define THROTTLE_JIFFIES (HZ/8) 58 59 // between wakeups 60 #define UNLINK_TIMEOUT_MS 3 61 62 /*-------------------------------------------------------------------------*/ 63 64 /* use ethtool to change the level for any given device */ 65 static int msg_level = -1; 66 module_param (msg_level, int, 0); 67 MODULE_PARM_DESC (msg_level, "Override default message level"); 68 69 /*-------------------------------------------------------------------------*/ 70 71 static const char * const usbnet_event_names[] = { 72 [EVENT_TX_HALT] = "EVENT_TX_HALT", 73 [EVENT_RX_HALT] = "EVENT_RX_HALT", 74 [EVENT_RX_MEMORY] = "EVENT_RX_MEMORY", 75 [EVENT_STS_SPLIT] = "EVENT_STS_SPLIT", 76 [EVENT_LINK_RESET] = "EVENT_LINK_RESET", 77 [EVENT_RX_PAUSED] = "EVENT_RX_PAUSED", 78 [EVENT_DEV_ASLEEP] = "EVENT_DEV_ASLEEP", 79 [EVENT_DEV_OPEN] = "EVENT_DEV_OPEN", 80 [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE", 81 [EVENT_NO_RUNTIME_PM] = "EVENT_NO_RUNTIME_PM", 82 [EVENT_RX_KILL] = "EVENT_RX_KILL", 83 [EVENT_LINK_CHANGE] = "EVENT_LINK_CHANGE", 84 [EVENT_SET_RX_MODE] = "EVENT_SET_RX_MODE", 85 [EVENT_NO_IP_ALIGN] = "EVENT_NO_IP_ALIGN", 86 }; 87 88 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 89 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 90 { 91 int tmp; 92 struct usb_host_interface *alt = NULL; 93 struct usb_host_endpoint *in = NULL, *out = NULL; 94 struct usb_host_endpoint *status = NULL; 95 96 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 97 unsigned ep; 98 99 in = out = status = NULL; 100 alt = intf->altsetting + tmp; 101 102 /* take the first altsetting with in-bulk + out-bulk; 103 * remember any status endpoint, just in case; 104 * ignore other endpoints and altsettings. 105 */ 106 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 107 struct usb_host_endpoint *e; 108 int intr = 0; 109 110 e = alt->endpoint + ep; 111 112 /* ignore endpoints which cannot transfer data */ 113 if (!usb_endpoint_maxp(&e->desc)) 114 continue; 115 116 switch (e->desc.bmAttributes) { 117 case USB_ENDPOINT_XFER_INT: 118 if (!usb_endpoint_dir_in(&e->desc)) 119 continue; 120 intr = 1; 121 fallthrough; 122 case USB_ENDPOINT_XFER_BULK: 123 break; 124 default: 125 continue; 126 } 127 if (usb_endpoint_dir_in(&e->desc)) { 128 if (!intr && !in) 129 in = e; 130 else if (intr && !status) 131 status = e; 132 } else { 133 if (!out) 134 out = e; 135 } 136 } 137 if (in && out) 138 break; 139 } 140 if (!alt || !in || !out) 141 return -EINVAL; 142 143 if (alt->desc.bAlternateSetting != 0 || 144 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 145 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 146 alt->desc.bAlternateSetting); 147 if (tmp < 0) 148 return tmp; 149 } 150 151 dev->in = usb_rcvbulkpipe (dev->udev, 152 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 153 dev->out = usb_sndbulkpipe (dev->udev, 154 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 155 dev->status = status; 156 return 0; 157 } 158 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 159 160 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 161 { 162 u8 addr[ETH_ALEN]; 163 int tmp = -1, ret; 164 unsigned char buf [13]; 165 166 ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 167 if (ret == 12) 168 tmp = hex2bin(addr, buf, 6); 169 if (tmp < 0) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (ret >= 0) 173 ret = -EINVAL; 174 return ret; 175 } 176 eth_hw_addr_set(dev->net, addr); 177 return 0; 178 } 179 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 180 181 static void intr_complete (struct urb *urb) 182 { 183 struct usbnet *dev = urb->context; 184 int status = urb->status; 185 186 switch (status) { 187 /* success */ 188 case 0: 189 dev->driver_info->status(dev, urb); 190 break; 191 192 /* software-driven interface shutdown */ 193 case -ENOENT: /* urb killed */ 194 case -ESHUTDOWN: /* hardware gone */ 195 netif_dbg(dev, ifdown, dev->net, 196 "intr shutdown, code %d\n", status); 197 return; 198 199 /* NOTE: not throttling like RX/TX, since this endpoint 200 * already polls infrequently 201 */ 202 default: 203 netdev_dbg(dev->net, "intr status %d\n", status); 204 break; 205 } 206 207 status = usb_submit_urb (urb, GFP_ATOMIC); 208 if (status != 0) 209 netif_err(dev, timer, dev->net, 210 "intr resubmit --> %d\n", status); 211 } 212 213 static int init_status (struct usbnet *dev, struct usb_interface *intf) 214 { 215 char *buf = NULL; 216 unsigned pipe = 0; 217 unsigned maxp; 218 unsigned period; 219 220 if (!dev->driver_info->status) 221 return 0; 222 223 pipe = usb_rcvintpipe (dev->udev, 224 dev->status->desc.bEndpointAddress 225 & USB_ENDPOINT_NUMBER_MASK); 226 maxp = usb_maxpacket(dev->udev, pipe); 227 228 /* avoid 1 msec chatter: min 8 msec poll rate */ 229 period = max ((int) dev->status->desc.bInterval, 230 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 231 232 buf = kmalloc (maxp, GFP_KERNEL); 233 if (buf) { 234 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 235 if (!dev->interrupt) { 236 kfree (buf); 237 return -ENOMEM; 238 } else { 239 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 240 buf, maxp, intr_complete, dev, period); 241 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 242 dev_dbg(&intf->dev, 243 "status ep%din, %d bytes period %d\n", 244 usb_pipeendpoint(pipe), maxp, period); 245 } 246 } 247 return 0; 248 } 249 250 /* Submit the interrupt URB if not previously submitted, increasing refcount */ 251 int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) 252 { 253 int ret = 0; 254 255 WARN_ON_ONCE(dev->interrupt == NULL); 256 if (dev->interrupt) { 257 mutex_lock(&dev->interrupt_mutex); 258 259 if (++dev->interrupt_count == 1) 260 ret = usb_submit_urb(dev->interrupt, mem_flags); 261 262 dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", 263 dev->interrupt_count); 264 mutex_unlock(&dev->interrupt_mutex); 265 } 266 return ret; 267 } 268 EXPORT_SYMBOL_GPL(usbnet_status_start); 269 270 /* For resume; submit interrupt URB if previously submitted */ 271 static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) 272 { 273 int ret = 0; 274 275 mutex_lock(&dev->interrupt_mutex); 276 if (dev->interrupt_count) { 277 ret = usb_submit_urb(dev->interrupt, mem_flags); 278 dev_dbg(&dev->udev->dev, 279 "submitted interrupt URB for resume\n"); 280 } 281 mutex_unlock(&dev->interrupt_mutex); 282 return ret; 283 } 284 285 /* Kill the interrupt URB if all submitters want it killed */ 286 void usbnet_status_stop(struct usbnet *dev) 287 { 288 if (dev->interrupt) { 289 mutex_lock(&dev->interrupt_mutex); 290 WARN_ON(dev->interrupt_count == 0); 291 292 if (dev->interrupt_count && --dev->interrupt_count == 0) 293 usb_kill_urb(dev->interrupt); 294 295 dev_dbg(&dev->udev->dev, 296 "decremented interrupt URB count to %d\n", 297 dev->interrupt_count); 298 mutex_unlock(&dev->interrupt_mutex); 299 } 300 } 301 EXPORT_SYMBOL_GPL(usbnet_status_stop); 302 303 /* For suspend; always kill interrupt URB */ 304 static void __usbnet_status_stop_force(struct usbnet *dev) 305 { 306 if (dev->interrupt) { 307 mutex_lock(&dev->interrupt_mutex); 308 usb_kill_urb(dev->interrupt); 309 dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); 310 mutex_unlock(&dev->interrupt_mutex); 311 } 312 } 313 314 /* Passes this packet up the stack, updating its accounting. 315 * Some link protocols batch packets, so their rx_fixup paths 316 * can return clones as well as just modify the original skb. 317 */ 318 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 319 { 320 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 321 unsigned long flags; 322 int status; 323 324 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 325 skb_queue_tail(&dev->rxq_pause, skb); 326 return; 327 } 328 329 /* only update if unset to allow minidriver rx_fixup override */ 330 if (skb->protocol == 0) 331 skb->protocol = eth_type_trans (skb, dev->net); 332 333 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 334 u64_stats_inc(&stats64->rx_packets); 335 u64_stats_add(&stats64->rx_bytes, skb->len); 336 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 337 338 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 339 skb->len + sizeof (struct ethhdr), skb->protocol); 340 memset (skb->cb, 0, sizeof (struct skb_data)); 341 342 if (skb_defer_rx_timestamp(skb)) 343 return; 344 345 status = netif_rx (skb); 346 if (status != NET_RX_SUCCESS) 347 netif_dbg(dev, rx_err, dev->net, 348 "netif_rx status %d\n", status); 349 } 350 EXPORT_SYMBOL_GPL(usbnet_skb_return); 351 352 /* must be called if hard_mtu or rx_urb_size changed */ 353 void usbnet_update_max_qlen(struct usbnet *dev) 354 { 355 enum usb_device_speed speed = dev->udev->speed; 356 357 if (!dev->rx_urb_size || !dev->hard_mtu) 358 goto insanity; 359 switch (speed) { 360 case USB_SPEED_HIGH: 361 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; 362 dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; 363 break; 364 case USB_SPEED_SUPER: 365 case USB_SPEED_SUPER_PLUS: 366 /* 367 * Not take default 5ms qlen for super speed HC to 368 * save memory, and iperf tests show 2.5ms qlen can 369 * work well 370 */ 371 dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; 372 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; 373 break; 374 default: 375 insanity: 376 dev->rx_qlen = dev->tx_qlen = 4; 377 } 378 } 379 EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); 380 381 382 /*------------------------------------------------------------------------- 383 * 384 * Network Device Driver (peer link to "Host Device", from USB host) 385 * 386 *-------------------------------------------------------------------------*/ 387 388 int usbnet_change_mtu (struct net_device *net, int new_mtu) 389 { 390 struct usbnet *dev = netdev_priv(net); 391 int ll_mtu = new_mtu + net->hard_header_len; 392 int old_hard_mtu = dev->hard_mtu; 393 int old_rx_urb_size = dev->rx_urb_size; 394 395 // no second zero-length packet read wanted after mtu-sized packets 396 if ((ll_mtu % dev->maxpacket) == 0) 397 return -EDOM; 398 WRITE_ONCE(net->mtu, new_mtu); 399 400 dev->hard_mtu = net->mtu + net->hard_header_len; 401 if (dev->rx_urb_size == old_hard_mtu) { 402 dev->rx_urb_size = dev->hard_mtu; 403 if (dev->rx_urb_size > old_rx_urb_size) { 404 usbnet_pause_rx(dev); 405 usbnet_unlink_rx_urbs(dev); 406 usbnet_resume_rx(dev); 407 } 408 } 409 410 /* max qlen depend on hard_mtu and rx_urb_size */ 411 usbnet_update_max_qlen(dev); 412 413 return 0; 414 } 415 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 416 417 /* The caller must hold list->lock */ 418 static void __usbnet_queue_skb(struct sk_buff_head *list, 419 struct sk_buff *newsk, enum skb_state state) 420 { 421 struct skb_data *entry = (struct skb_data *) newsk->cb; 422 423 __skb_queue_tail(list, newsk); 424 entry->state = state; 425 } 426 427 /*-------------------------------------------------------------------------*/ 428 429 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 430 * completion callbacks. 2.5 should have fixed those bugs... 431 */ 432 433 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 434 struct sk_buff_head *list, enum skb_state state) 435 { 436 unsigned long flags; 437 enum skb_state old_state; 438 struct skb_data *entry = (struct skb_data *) skb->cb; 439 440 spin_lock_irqsave(&list->lock, flags); 441 old_state = entry->state; 442 entry->state = state; 443 __skb_unlink(skb, list); 444 445 /* defer_bh() is never called with list == &dev->done. 446 * spin_lock_nested() tells lockdep that it is OK to take 447 * dev->done.lock here with list->lock held. 448 */ 449 spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); 450 451 __skb_queue_tail(&dev->done, skb); 452 if (dev->done.qlen == 1) 453 tasklet_schedule(&dev->bh); 454 spin_unlock(&dev->done.lock); 455 spin_unlock_irqrestore(&list->lock, flags); 456 return old_state; 457 } 458 459 /* some work can't be done in tasklets, so we use keventd 460 * 461 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 462 * but tasklet_schedule() doesn't. hope the failure is rare. 463 */ 464 void usbnet_defer_kevent (struct usbnet *dev, int work) 465 { 466 set_bit (work, &dev->flags); 467 if (!schedule_work (&dev->kevent)) 468 netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]); 469 else 470 netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]); 471 } 472 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 473 474 /*-------------------------------------------------------------------------*/ 475 476 static void rx_complete (struct urb *urb); 477 478 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 479 { 480 struct sk_buff *skb; 481 struct skb_data *entry; 482 int retval = 0; 483 unsigned long lockflags; 484 size_t size = dev->rx_urb_size; 485 486 /* prevent rx skb allocation when error ratio is high */ 487 if (test_bit(EVENT_RX_KILL, &dev->flags)) { 488 usb_free_urb(urb); 489 return -ENOLINK; 490 } 491 492 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) 493 skb = __netdev_alloc_skb(dev->net, size, flags); 494 else 495 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 496 if (!skb) { 497 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 498 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 499 usb_free_urb (urb); 500 return -ENOMEM; 501 } 502 503 entry = (struct skb_data *) skb->cb; 504 entry->urb = urb; 505 entry->dev = dev; 506 entry->length = 0; 507 508 usb_fill_bulk_urb (urb, dev->udev, dev->in, 509 skb->data, size, rx_complete, skb); 510 511 spin_lock_irqsave (&dev->rxq.lock, lockflags); 512 513 if (netif_running (dev->net) && 514 netif_device_present (dev->net) && 515 test_bit(EVENT_DEV_OPEN, &dev->flags) && 516 !test_bit (EVENT_RX_HALT, &dev->flags) && 517 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 518 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 519 case -EPIPE: 520 usbnet_defer_kevent (dev, EVENT_RX_HALT); 521 break; 522 case -ENOMEM: 523 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 524 break; 525 case -ENODEV: 526 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 527 netif_device_detach (dev->net); 528 break; 529 case -EHOSTUNREACH: 530 retval = -ENOLINK; 531 break; 532 default: 533 netif_dbg(dev, rx_err, dev->net, 534 "rx submit, %d\n", retval); 535 tasklet_schedule (&dev->bh); 536 break; 537 case 0: 538 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 539 } 540 } else { 541 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 542 retval = -ENOLINK; 543 } 544 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 545 if (retval) { 546 dev_kfree_skb_any (skb); 547 usb_free_urb (urb); 548 } 549 return retval; 550 } 551 552 553 /*-------------------------------------------------------------------------*/ 554 555 static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) 556 { 557 if (dev->driver_info->rx_fixup && 558 !dev->driver_info->rx_fixup (dev, skb)) { 559 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 560 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 561 dev->net->stats.rx_errors++; 562 return -EPROTO; 563 } 564 // else network stack removes extra byte if we forced a short packet 565 566 /* all data was already cloned from skb inside the driver */ 567 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 568 return -EALREADY; 569 570 if (skb->len < ETH_HLEN) { 571 dev->net->stats.rx_errors++; 572 dev->net->stats.rx_length_errors++; 573 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 574 return -EPROTO; 575 } 576 577 usbnet_skb_return(dev, skb); 578 return 0; 579 } 580 581 /*-------------------------------------------------------------------------*/ 582 583 static void rx_complete (struct urb *urb) 584 { 585 struct sk_buff *skb = (struct sk_buff *) urb->context; 586 struct skb_data *entry = (struct skb_data *) skb->cb; 587 struct usbnet *dev = entry->dev; 588 int urb_status = urb->status; 589 enum skb_state state; 590 591 skb_put (skb, urb->actual_length); 592 state = rx_done; 593 entry->urb = NULL; 594 595 switch (urb_status) { 596 /* success */ 597 case 0: 598 break; 599 600 /* stalls need manual reset. this is rare ... except that 601 * when going through USB 2.0 TTs, unplug appears this way. 602 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 603 * storm, recovering as needed. 604 */ 605 case -EPIPE: 606 dev->net->stats.rx_errors++; 607 usbnet_defer_kevent (dev, EVENT_RX_HALT); 608 fallthrough; 609 610 /* software-driven interface shutdown */ 611 case -ECONNRESET: /* async unlink */ 612 case -ESHUTDOWN: /* hardware gone */ 613 netif_dbg(dev, ifdown, dev->net, 614 "rx shutdown, code %d\n", urb_status); 615 goto block; 616 617 /* we get controller i/o faults during hub_wq disconnect() delays. 618 * throttle down resubmits, to avoid log floods; just temporarily, 619 * so we still recover when the fault isn't a hub_wq delay. 620 */ 621 case -EPROTO: 622 case -ETIME: 623 case -EILSEQ: 624 dev->net->stats.rx_errors++; 625 if (!timer_pending (&dev->delay)) { 626 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 627 netif_dbg(dev, link, dev->net, 628 "rx throttle %d\n", urb_status); 629 } 630 block: 631 state = rx_cleanup; 632 entry->urb = urb; 633 urb = NULL; 634 break; 635 636 /* data overrun ... flush fifo? */ 637 case -EOVERFLOW: 638 dev->net->stats.rx_over_errors++; 639 fallthrough; 640 641 default: 642 state = rx_cleanup; 643 dev->net->stats.rx_errors++; 644 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 645 break; 646 } 647 648 /* stop rx if packet error rate is high */ 649 if (++dev->pkt_cnt > 30) { 650 dev->pkt_cnt = 0; 651 dev->pkt_err = 0; 652 } else { 653 if (state == rx_cleanup) 654 dev->pkt_err++; 655 if (dev->pkt_err > 20) 656 set_bit(EVENT_RX_KILL, &dev->flags); 657 } 658 659 state = defer_bh(dev, skb, &dev->rxq, state); 660 661 if (urb) { 662 if (netif_running (dev->net) && 663 !test_bit (EVENT_RX_HALT, &dev->flags) && 664 state != unlink_start) { 665 rx_submit (dev, urb, GFP_ATOMIC); 666 usb_mark_last_busy(dev->udev); 667 return; 668 } 669 usb_free_urb (urb); 670 } 671 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 672 } 673 674 /*-------------------------------------------------------------------------*/ 675 void usbnet_pause_rx(struct usbnet *dev) 676 { 677 set_bit(EVENT_RX_PAUSED, &dev->flags); 678 679 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 680 } 681 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 682 683 void usbnet_resume_rx(struct usbnet *dev) 684 { 685 struct sk_buff *skb; 686 int num = 0; 687 688 clear_bit(EVENT_RX_PAUSED, &dev->flags); 689 690 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 691 usbnet_skb_return(dev, skb); 692 num++; 693 } 694 695 tasklet_schedule(&dev->bh); 696 697 netif_dbg(dev, rx_status, dev->net, 698 "paused rx queue disabled, %d skbs requeued\n", num); 699 } 700 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 701 702 void usbnet_purge_paused_rxq(struct usbnet *dev) 703 { 704 skb_queue_purge(&dev->rxq_pause); 705 } 706 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 707 708 /*-------------------------------------------------------------------------*/ 709 710 // unlink pending rx/tx; completion handlers do all other cleanup 711 712 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 713 { 714 unsigned long flags; 715 struct sk_buff *skb; 716 int count = 0; 717 718 spin_lock_irqsave (&q->lock, flags); 719 while (!skb_queue_empty(q)) { 720 struct skb_data *entry; 721 struct urb *urb; 722 int retval; 723 724 skb_queue_walk(q, skb) { 725 entry = (struct skb_data *) skb->cb; 726 if (entry->state != unlink_start) 727 goto found; 728 } 729 break; 730 found: 731 entry->state = unlink_start; 732 urb = entry->urb; 733 734 /* 735 * Get reference count of the URB to avoid it to be 736 * freed during usb_unlink_urb, which may trigger 737 * use-after-free problem inside usb_unlink_urb since 738 * usb_unlink_urb is always racing with .complete 739 * handler(include defer_bh). 740 */ 741 usb_get_urb(urb); 742 spin_unlock_irqrestore(&q->lock, flags); 743 // during some PM-driven resume scenarios, 744 // these (async) unlinks complete immediately 745 retval = usb_unlink_urb (urb); 746 if (retval != -EINPROGRESS && retval != 0) 747 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 748 else 749 count++; 750 usb_put_urb(urb); 751 spin_lock_irqsave(&q->lock, flags); 752 } 753 spin_unlock_irqrestore (&q->lock, flags); 754 return count; 755 } 756 757 // Flush all pending rx urbs 758 // minidrivers may need to do this when the MTU changes 759 760 void usbnet_unlink_rx_urbs(struct usbnet *dev) 761 { 762 if (netif_running(dev->net)) { 763 (void) unlink_urbs (dev, &dev->rxq); 764 tasklet_schedule(&dev->bh); 765 } 766 } 767 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 768 769 /*-------------------------------------------------------------------------*/ 770 771 static void wait_skb_queue_empty(struct sk_buff_head *q) 772 { 773 unsigned long flags; 774 775 spin_lock_irqsave(&q->lock, flags); 776 while (!skb_queue_empty(q)) { 777 spin_unlock_irqrestore(&q->lock, flags); 778 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 779 set_current_state(TASK_UNINTERRUPTIBLE); 780 spin_lock_irqsave(&q->lock, flags); 781 } 782 spin_unlock_irqrestore(&q->lock, flags); 783 } 784 785 // precondition: never called in_interrupt 786 static void usbnet_terminate_urbs(struct usbnet *dev) 787 { 788 DECLARE_WAITQUEUE(wait, current); 789 int temp; 790 791 /* ensure there are no more active urbs */ 792 add_wait_queue(&dev->wait, &wait); 793 set_current_state(TASK_UNINTERRUPTIBLE); 794 temp = unlink_urbs(dev, &dev->txq) + 795 unlink_urbs(dev, &dev->rxq); 796 797 /* maybe wait for deletions to finish. */ 798 wait_skb_queue_empty(&dev->rxq); 799 wait_skb_queue_empty(&dev->txq); 800 wait_skb_queue_empty(&dev->done); 801 netif_dbg(dev, ifdown, dev->net, 802 "waited for %d urb completions\n", temp); 803 set_current_state(TASK_RUNNING); 804 remove_wait_queue(&dev->wait, &wait); 805 } 806 807 int usbnet_stop (struct net_device *net) 808 { 809 struct usbnet *dev = netdev_priv(net); 810 const struct driver_info *info = dev->driver_info; 811 int retval, pm, mpn; 812 813 clear_bit(EVENT_DEV_OPEN, &dev->flags); 814 netif_stop_queue (net); 815 816 netif_info(dev, ifdown, dev->net, 817 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 818 net->stats.rx_packets, net->stats.tx_packets, 819 net->stats.rx_errors, net->stats.tx_errors); 820 821 /* to not race resume */ 822 pm = usb_autopm_get_interface(dev->intf); 823 /* allow minidriver to stop correctly (wireless devices to turn off 824 * radio etc) */ 825 if (info->stop) { 826 retval = info->stop(dev); 827 if (retval < 0) 828 netif_info(dev, ifdown, dev->net, 829 "stop fail (%d) usbnet usb-%s-%s, %s\n", 830 retval, 831 dev->udev->bus->bus_name, dev->udev->devpath, 832 info->description); 833 } 834 835 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 836 usbnet_terminate_urbs(dev); 837 838 usbnet_status_stop(dev); 839 840 usbnet_purge_paused_rxq(dev); 841 842 mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 843 844 /* deferred work (timer, softirq, task) must also stop */ 845 dev->flags = 0; 846 del_timer_sync (&dev->delay); 847 tasklet_kill (&dev->bh); 848 cancel_work_sync(&dev->kevent); 849 if (!pm) 850 usb_autopm_put_interface(dev->intf); 851 852 if (info->manage_power && mpn) 853 info->manage_power(dev, 0); 854 else 855 usb_autopm_put_interface(dev->intf); 856 857 return 0; 858 } 859 EXPORT_SYMBOL_GPL(usbnet_stop); 860 861 /*-------------------------------------------------------------------------*/ 862 863 // posts reads, and enables write queuing 864 865 // precondition: never called in_interrupt 866 867 int usbnet_open (struct net_device *net) 868 { 869 struct usbnet *dev = netdev_priv(net); 870 int retval; 871 const struct driver_info *info = dev->driver_info; 872 873 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 874 netif_info(dev, ifup, dev->net, 875 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 876 retval, 877 dev->udev->bus->bus_name, 878 dev->udev->devpath, 879 info->description); 880 goto done_nopm; 881 } 882 883 // put into "known safe" state 884 if (info->reset && (retval = info->reset (dev)) < 0) { 885 netif_info(dev, ifup, dev->net, 886 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 887 retval, 888 dev->udev->bus->bus_name, 889 dev->udev->devpath, 890 info->description); 891 goto done; 892 } 893 894 /* hard_mtu or rx_urb_size may change in reset() */ 895 usbnet_update_max_qlen(dev); 896 897 // insist peer be connected 898 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 899 netif_err(dev, ifup, dev->net, "can't open; %d\n", retval); 900 goto done; 901 } 902 903 /* start any status interrupt transfer */ 904 if (dev->interrupt) { 905 retval = usbnet_status_start(dev, GFP_KERNEL); 906 if (retval < 0) { 907 netif_err(dev, ifup, dev->net, 908 "intr submit %d\n", retval); 909 goto done; 910 } 911 } 912 913 set_bit(EVENT_DEV_OPEN, &dev->flags); 914 netif_start_queue (net); 915 netif_info(dev, ifup, dev->net, 916 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 917 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 918 dev->net->mtu, 919 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 920 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 921 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 922 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 923 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 924 "simple"); 925 926 /* reset rx error state */ 927 dev->pkt_cnt = 0; 928 dev->pkt_err = 0; 929 clear_bit(EVENT_RX_KILL, &dev->flags); 930 931 // delay posting reads until we're fully open 932 tasklet_schedule (&dev->bh); 933 if (info->manage_power) { 934 retval = info->manage_power(dev, 1); 935 if (retval < 0) { 936 retval = 0; 937 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 938 } else { 939 usb_autopm_put_interface(dev->intf); 940 } 941 } 942 return retval; 943 done: 944 usb_autopm_put_interface(dev->intf); 945 done_nopm: 946 return retval; 947 } 948 EXPORT_SYMBOL_GPL(usbnet_open); 949 950 /*-------------------------------------------------------------------------*/ 951 952 /* ethtool methods; minidrivers may need to add some more, but 953 * they'll probably want to use this base set. 954 */ 955 956 /* These methods are written on the assumption that the device 957 * uses MII 958 */ 959 int usbnet_get_link_ksettings_mii(struct net_device *net, 960 struct ethtool_link_ksettings *cmd) 961 { 962 struct usbnet *dev = netdev_priv(net); 963 964 if (!dev->mii.mdio_read) 965 return -EOPNOTSUPP; 966 967 mii_ethtool_get_link_ksettings(&dev->mii, cmd); 968 969 return 0; 970 } 971 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii); 972 973 int usbnet_get_link_ksettings_internal(struct net_device *net, 974 struct ethtool_link_ksettings *cmd) 975 { 976 struct usbnet *dev = netdev_priv(net); 977 978 /* the assumption that speed is equal on tx and rx 979 * is deeply engrained into the networking layer. 980 * For wireless stuff it is not true. 981 * We assume that rx_speed matters more. 982 */ 983 if (dev->rx_speed != SPEED_UNSET) 984 cmd->base.speed = dev->rx_speed / 1000000; 985 else if (dev->tx_speed != SPEED_UNSET) 986 cmd->base.speed = dev->tx_speed / 1000000; 987 else 988 cmd->base.speed = SPEED_UNKNOWN; 989 990 return 0; 991 } 992 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal); 993 994 int usbnet_set_link_ksettings_mii(struct net_device *net, 995 const struct ethtool_link_ksettings *cmd) 996 { 997 struct usbnet *dev = netdev_priv(net); 998 int retval; 999 1000 if (!dev->mii.mdio_write) 1001 return -EOPNOTSUPP; 1002 1003 retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); 1004 1005 /* link speed/duplex might have changed */ 1006 if (dev->driver_info->link_reset) 1007 dev->driver_info->link_reset(dev); 1008 1009 /* hard_mtu or rx_urb_size may change in link_reset() */ 1010 usbnet_update_max_qlen(dev); 1011 1012 return retval; 1013 } 1014 EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); 1015 1016 u32 usbnet_get_link (struct net_device *net) 1017 { 1018 struct usbnet *dev = netdev_priv(net); 1019 1020 /* If a check_connect is defined, return its result */ 1021 if (dev->driver_info->check_connect) 1022 return dev->driver_info->check_connect (dev) == 0; 1023 1024 /* if the device has mii operations, use those */ 1025 if (dev->mii.mdio_read) 1026 return mii_link_ok(&dev->mii); 1027 1028 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 1029 return ethtool_op_get_link(net); 1030 } 1031 EXPORT_SYMBOL_GPL(usbnet_get_link); 1032 1033 int usbnet_nway_reset(struct net_device *net) 1034 { 1035 struct usbnet *dev = netdev_priv(net); 1036 1037 if (!dev->mii.mdio_write) 1038 return -EOPNOTSUPP; 1039 1040 return mii_nway_restart(&dev->mii); 1041 } 1042 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 1043 1044 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 1045 { 1046 struct usbnet *dev = netdev_priv(net); 1047 1048 strscpy(info->driver, dev->driver_name, sizeof(info->driver)); 1049 strscpy(info->fw_version, dev->driver_info->description, 1050 sizeof(info->fw_version)); 1051 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 1052 } 1053 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 1054 1055 u32 usbnet_get_msglevel (struct net_device *net) 1056 { 1057 struct usbnet *dev = netdev_priv(net); 1058 1059 return dev->msg_enable; 1060 } 1061 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 1062 1063 void usbnet_set_msglevel (struct net_device *net, u32 level) 1064 { 1065 struct usbnet *dev = netdev_priv(net); 1066 1067 dev->msg_enable = level; 1068 } 1069 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 1070 1071 /* drivers may override default ethtool_ops in their bind() routine */ 1072 static const struct ethtool_ops usbnet_ethtool_ops = { 1073 .get_link = usbnet_get_link, 1074 .nway_reset = usbnet_nway_reset, 1075 .get_drvinfo = usbnet_get_drvinfo, 1076 .get_msglevel = usbnet_get_msglevel, 1077 .set_msglevel = usbnet_set_msglevel, 1078 .get_ts_info = ethtool_op_get_ts_info, 1079 .get_link_ksettings = usbnet_get_link_ksettings_mii, 1080 .set_link_ksettings = usbnet_set_link_ksettings_mii, 1081 }; 1082 1083 /*-------------------------------------------------------------------------*/ 1084 1085 static void __handle_link_change(struct usbnet *dev) 1086 { 1087 if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) 1088 return; 1089 1090 if (!netif_carrier_ok(dev->net)) { 1091 /* kill URBs for reading packets to save bus bandwidth */ 1092 unlink_urbs(dev, &dev->rxq); 1093 1094 /* 1095 * tx_timeout will unlink URBs for sending packets and 1096 * tx queue is stopped by netcore after link becomes off 1097 */ 1098 } else { 1099 /* submitting URBs for reading packets */ 1100 tasklet_schedule(&dev->bh); 1101 } 1102 1103 /* hard_mtu or rx_urb_size may change during link change */ 1104 usbnet_update_max_qlen(dev); 1105 1106 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1107 } 1108 1109 void usbnet_set_rx_mode(struct net_device *net) 1110 { 1111 struct usbnet *dev = netdev_priv(net); 1112 1113 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); 1114 } 1115 EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); 1116 1117 static void __handle_set_rx_mode(struct usbnet *dev) 1118 { 1119 if (dev->driver_info->set_rx_mode) 1120 (dev->driver_info->set_rx_mode)(dev); 1121 1122 clear_bit(EVENT_SET_RX_MODE, &dev->flags); 1123 } 1124 1125 /* work that cannot be done in interrupt context uses keventd. 1126 * 1127 * NOTE: with 2.5 we could do more of this using completion callbacks, 1128 * especially now that control transfers can be queued. 1129 */ 1130 static void 1131 usbnet_deferred_kevent (struct work_struct *work) 1132 { 1133 struct usbnet *dev = 1134 container_of(work, struct usbnet, kevent); 1135 int status; 1136 1137 /* usb_clear_halt() needs a thread context */ 1138 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 1139 unlink_urbs (dev, &dev->txq); 1140 status = usb_autopm_get_interface(dev->intf); 1141 if (status < 0) 1142 goto fail_pipe; 1143 status = usb_clear_halt (dev->udev, dev->out); 1144 usb_autopm_put_interface(dev->intf); 1145 if (status < 0 && 1146 status != -EPIPE && 1147 status != -ESHUTDOWN) { 1148 if (netif_msg_tx_err (dev)) 1149 fail_pipe: 1150 netdev_err(dev->net, "can't clear tx halt, status %d\n", 1151 status); 1152 } else { 1153 clear_bit (EVENT_TX_HALT, &dev->flags); 1154 if (status != -ESHUTDOWN) 1155 netif_wake_queue (dev->net); 1156 } 1157 } 1158 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 1159 unlink_urbs (dev, &dev->rxq); 1160 status = usb_autopm_get_interface(dev->intf); 1161 if (status < 0) 1162 goto fail_halt; 1163 status = usb_clear_halt (dev->udev, dev->in); 1164 usb_autopm_put_interface(dev->intf); 1165 if (status < 0 && 1166 status != -EPIPE && 1167 status != -ESHUTDOWN) { 1168 if (netif_msg_rx_err (dev)) 1169 fail_halt: 1170 netdev_err(dev->net, "can't clear rx halt, status %d\n", 1171 status); 1172 } else { 1173 clear_bit (EVENT_RX_HALT, &dev->flags); 1174 tasklet_schedule (&dev->bh); 1175 } 1176 } 1177 1178 /* tasklet could resubmit itself forever if memory is tight */ 1179 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 1180 struct urb *urb = NULL; 1181 int resched = 1; 1182 1183 if (netif_running (dev->net)) 1184 urb = usb_alloc_urb (0, GFP_KERNEL); 1185 else 1186 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1187 if (urb != NULL) { 1188 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1189 status = usb_autopm_get_interface(dev->intf); 1190 if (status < 0) { 1191 usb_free_urb(urb); 1192 goto fail_lowmem; 1193 } 1194 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 1195 resched = 0; 1196 usb_autopm_put_interface(dev->intf); 1197 fail_lowmem: 1198 if (resched) 1199 tasklet_schedule (&dev->bh); 1200 } 1201 } 1202 1203 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 1204 const struct driver_info *info = dev->driver_info; 1205 int retval = 0; 1206 1207 clear_bit (EVENT_LINK_RESET, &dev->flags); 1208 status = usb_autopm_get_interface(dev->intf); 1209 if (status < 0) 1210 goto skip_reset; 1211 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1212 usb_autopm_put_interface(dev->intf); 1213 skip_reset: 1214 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1215 retval, 1216 dev->udev->bus->bus_name, 1217 dev->udev->devpath, 1218 info->description); 1219 } else { 1220 usb_autopm_put_interface(dev->intf); 1221 } 1222 1223 /* handle link change from link resetting */ 1224 __handle_link_change(dev); 1225 } 1226 1227 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1228 __handle_link_change(dev); 1229 1230 if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) 1231 __handle_set_rx_mode(dev); 1232 1233 1234 if (dev->flags) 1235 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1236 } 1237 1238 /*-------------------------------------------------------------------------*/ 1239 1240 static void tx_complete (struct urb *urb) 1241 { 1242 struct sk_buff *skb = (struct sk_buff *) urb->context; 1243 struct skb_data *entry = (struct skb_data *) skb->cb; 1244 struct usbnet *dev = entry->dev; 1245 1246 if (urb->status == 0) { 1247 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 1248 unsigned long flags; 1249 1250 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 1251 u64_stats_add(&stats64->tx_packets, entry->packets); 1252 u64_stats_add(&stats64->tx_bytes, entry->length); 1253 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 1254 } else { 1255 dev->net->stats.tx_errors++; 1256 1257 switch (urb->status) { 1258 case -EPIPE: 1259 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1260 break; 1261 1262 /* software-driven interface shutdown */ 1263 case -ECONNRESET: // async unlink 1264 case -ESHUTDOWN: // hardware gone 1265 break; 1266 1267 /* like rx, tx gets controller i/o faults during hub_wq 1268 * delays and so it uses the same throttling mechanism. 1269 */ 1270 case -EPROTO: 1271 case -ETIME: 1272 case -EILSEQ: 1273 usb_mark_last_busy(dev->udev); 1274 if (!timer_pending (&dev->delay)) { 1275 mod_timer (&dev->delay, 1276 jiffies + THROTTLE_JIFFIES); 1277 netif_dbg(dev, link, dev->net, 1278 "tx throttle %d\n", urb->status); 1279 } 1280 netif_stop_queue (dev->net); 1281 break; 1282 default: 1283 netif_dbg(dev, tx_err, dev->net, 1284 "tx err %d\n", entry->urb->status); 1285 break; 1286 } 1287 } 1288 1289 usb_autopm_put_interface_async(dev->intf); 1290 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1291 } 1292 1293 /*-------------------------------------------------------------------------*/ 1294 1295 void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue) 1296 { 1297 struct usbnet *dev = netdev_priv(net); 1298 1299 unlink_urbs (dev, &dev->txq); 1300 tasklet_schedule (&dev->bh); 1301 /* this needs to be handled individually because the generic layer 1302 * doesn't know what is sufficient and could not restore private 1303 * information if a remedy of an unconditional reset were used. 1304 */ 1305 if (dev->driver_info->recover) 1306 (dev->driver_info->recover)(dev); 1307 } 1308 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1309 1310 /*-------------------------------------------------------------------------*/ 1311 1312 static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) 1313 { 1314 unsigned num_sgs, total_len = 0; 1315 int i, s = 0; 1316 1317 num_sgs = skb_shinfo(skb)->nr_frags + 1; 1318 if (num_sgs == 1) 1319 return 0; 1320 1321 /* reserve one for zero packet */ 1322 urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist), 1323 GFP_ATOMIC); 1324 if (!urb->sg) 1325 return -ENOMEM; 1326 1327 urb->num_sgs = num_sgs; 1328 sg_init_table(urb->sg, urb->num_sgs + 1); 1329 1330 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); 1331 total_len += skb_headlen(skb); 1332 1333 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1334 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1335 1336 total_len += skb_frag_size(f); 1337 sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), 1338 skb_frag_off(f)); 1339 } 1340 urb->transfer_buffer_length = total_len; 1341 1342 return 1; 1343 } 1344 1345 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1346 struct net_device *net) 1347 { 1348 struct usbnet *dev = netdev_priv(net); 1349 unsigned int length; 1350 struct urb *urb = NULL; 1351 struct skb_data *entry; 1352 const struct driver_info *info = dev->driver_info; 1353 unsigned long flags; 1354 int retval; 1355 1356 if (skb) 1357 skb_tx_timestamp(skb); 1358 1359 // some devices want funky USB-level framing, for 1360 // win32 driver (usually) and/or hardware quirks 1361 if (info->tx_fixup) { 1362 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1363 if (!skb) { 1364 /* packet collected; minidriver waiting for more */ 1365 if (info->flags & FLAG_MULTI_PACKET) 1366 goto not_drop; 1367 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1368 goto drop; 1369 } 1370 } 1371 1372 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1373 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1374 goto drop; 1375 } 1376 1377 entry = (struct skb_data *) skb->cb; 1378 entry->urb = urb; 1379 entry->dev = dev; 1380 1381 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1382 skb->data, skb->len, tx_complete, skb); 1383 if (dev->can_dma_sg) { 1384 if (build_dma_sg(skb, urb) < 0) 1385 goto drop; 1386 } 1387 length = urb->transfer_buffer_length; 1388 1389 /* don't assume the hardware handles USB_ZERO_PACKET 1390 * NOTE: strictly conforming cdc-ether devices should expect 1391 * the ZLP here, but ignore the one-byte packet. 1392 * NOTE2: CDC NCM specification is different from CDC ECM when 1393 * handling ZLP/short packets, so cdc_ncm driver will make short 1394 * packet itself if needed. 1395 */ 1396 if (length % dev->maxpacket == 0) { 1397 if (!(info->flags & FLAG_SEND_ZLP)) { 1398 if (!(info->flags & FLAG_MULTI_PACKET)) { 1399 length++; 1400 if (skb_tailroom(skb) && !urb->num_sgs) { 1401 skb->data[skb->len] = 0; 1402 __skb_put(skb, 1); 1403 } else if (urb->num_sgs) 1404 sg_set_buf(&urb->sg[urb->num_sgs++], 1405 dev->padding_pkt, 1); 1406 } 1407 } else 1408 urb->transfer_flags |= URB_ZERO_PACKET; 1409 } 1410 urb->transfer_buffer_length = length; 1411 1412 if (info->flags & FLAG_MULTI_PACKET) { 1413 /* Driver has set number of packets and a length delta. 1414 * Calculate the complete length and ensure that it's 1415 * positive. 1416 */ 1417 entry->length += length; 1418 if (WARN_ON_ONCE(entry->length <= 0)) 1419 entry->length = length; 1420 } else { 1421 usbnet_set_skb_tx_stats(skb, 1, length); 1422 } 1423 1424 spin_lock_irqsave(&dev->txq.lock, flags); 1425 retval = usb_autopm_get_interface_async(dev->intf); 1426 if (retval < 0) { 1427 spin_unlock_irqrestore(&dev->txq.lock, flags); 1428 goto drop; 1429 } 1430 if (netif_queue_stopped(net)) { 1431 usb_autopm_put_interface_async(dev->intf); 1432 spin_unlock_irqrestore(&dev->txq.lock, flags); 1433 goto drop; 1434 } 1435 1436 #ifdef CONFIG_PM 1437 /* if this triggers the device is still a sleep */ 1438 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1439 /* transmission will be done in resume */ 1440 usb_anchor_urb(urb, &dev->deferred); 1441 /* no use to process more packets */ 1442 netif_stop_queue(net); 1443 usb_put_urb(urb); 1444 spin_unlock_irqrestore(&dev->txq.lock, flags); 1445 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1446 goto deferred; 1447 } 1448 #endif 1449 1450 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1451 case -EPIPE: 1452 netif_stop_queue (net); 1453 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1454 usb_autopm_put_interface_async(dev->intf); 1455 break; 1456 default: 1457 usb_autopm_put_interface_async(dev->intf); 1458 netif_dbg(dev, tx_err, dev->net, 1459 "tx: submit urb err %d\n", retval); 1460 break; 1461 case 0: 1462 netif_trans_update(net); 1463 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1464 if (dev->txq.qlen >= TX_QLEN (dev)) 1465 netif_stop_queue (net); 1466 } 1467 spin_unlock_irqrestore (&dev->txq.lock, flags); 1468 1469 if (retval) { 1470 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1471 drop: 1472 dev->net->stats.tx_dropped++; 1473 not_drop: 1474 if (skb) 1475 dev_kfree_skb_any (skb); 1476 if (urb) { 1477 kfree(urb->sg); 1478 usb_free_urb(urb); 1479 } 1480 } else 1481 netif_dbg(dev, tx_queued, dev->net, 1482 "> tx, len %u, type 0x%x\n", length, skb->protocol); 1483 #ifdef CONFIG_PM 1484 deferred: 1485 #endif 1486 return NETDEV_TX_OK; 1487 } 1488 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1489 1490 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1491 { 1492 struct urb *urb; 1493 int i; 1494 int ret = 0; 1495 1496 /* don't refill the queue all at once */ 1497 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1498 urb = usb_alloc_urb(0, flags); 1499 if (urb != NULL) { 1500 ret = rx_submit(dev, urb, flags); 1501 if (ret) 1502 goto err; 1503 } else { 1504 ret = -ENOMEM; 1505 goto err; 1506 } 1507 } 1508 err: 1509 return ret; 1510 } 1511 1512 static inline void usb_free_skb(struct sk_buff *skb) 1513 { 1514 struct skb_data *entry = (struct skb_data *)skb->cb; 1515 1516 usb_free_urb(entry->urb); 1517 dev_kfree_skb(skb); 1518 } 1519 1520 /*-------------------------------------------------------------------------*/ 1521 1522 // tasklet (work deferred from completions, in_irq) or timer 1523 1524 static void usbnet_bh (struct timer_list *t) 1525 { 1526 struct usbnet *dev = from_timer(dev, t, delay); 1527 struct sk_buff *skb; 1528 struct skb_data *entry; 1529 1530 while ((skb = skb_dequeue (&dev->done))) { 1531 entry = (struct skb_data *) skb->cb; 1532 switch (entry->state) { 1533 case rx_done: 1534 if (rx_process(dev, skb)) 1535 usb_free_skb(skb); 1536 continue; 1537 case tx_done: 1538 kfree(entry->urb->sg); 1539 fallthrough; 1540 case rx_cleanup: 1541 usb_free_skb(skb); 1542 continue; 1543 default: 1544 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1545 } 1546 } 1547 1548 /* restart RX again after disabling due to high error rate */ 1549 clear_bit(EVENT_RX_KILL, &dev->flags); 1550 1551 /* waiting for all pending urbs to complete? 1552 * only then can we forgo submitting anew 1553 */ 1554 if (waitqueue_active(&dev->wait)) { 1555 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) 1556 wake_up_all(&dev->wait); 1557 1558 // or are we maybe short a few urbs? 1559 } else if (netif_running (dev->net) && 1560 netif_device_present (dev->net) && 1561 netif_carrier_ok(dev->net) && 1562 !timer_pending(&dev->delay) && 1563 !test_bit(EVENT_RX_PAUSED, &dev->flags) && 1564 !test_bit(EVENT_RX_HALT, &dev->flags)) { 1565 int temp = dev->rxq.qlen; 1566 1567 if (temp < RX_QLEN(dev)) { 1568 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1569 return; 1570 if (temp != dev->rxq.qlen) 1571 netif_dbg(dev, link, dev->net, 1572 "rxqlen %d --> %d\n", 1573 temp, dev->rxq.qlen); 1574 if (dev->rxq.qlen < RX_QLEN(dev)) 1575 tasklet_schedule (&dev->bh); 1576 } 1577 if (dev->txq.qlen < TX_QLEN (dev)) 1578 netif_wake_queue (dev->net); 1579 } 1580 } 1581 1582 static void usbnet_bh_tasklet(struct tasklet_struct *t) 1583 { 1584 struct usbnet *dev = from_tasklet(dev, t, bh); 1585 1586 usbnet_bh(&dev->delay); 1587 } 1588 1589 1590 /*------------------------------------------------------------------------- 1591 * 1592 * USB Device Driver support 1593 * 1594 *-------------------------------------------------------------------------*/ 1595 1596 // precondition: never called in_interrupt 1597 1598 void usbnet_disconnect (struct usb_interface *intf) 1599 { 1600 struct usbnet *dev; 1601 struct usb_device *xdev; 1602 struct net_device *net; 1603 struct urb *urb; 1604 1605 dev = usb_get_intfdata(intf); 1606 usb_set_intfdata(intf, NULL); 1607 if (!dev) 1608 return; 1609 1610 xdev = interface_to_usbdev (intf); 1611 1612 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1613 intf->dev.driver->name, 1614 xdev->bus->bus_name, xdev->devpath, 1615 dev->driver_info->description); 1616 1617 net = dev->net; 1618 unregister_netdev (net); 1619 1620 while ((urb = usb_get_from_anchor(&dev->deferred))) { 1621 dev_kfree_skb(urb->context); 1622 kfree(urb->sg); 1623 usb_free_urb(urb); 1624 } 1625 1626 if (dev->driver_info->unbind) 1627 dev->driver_info->unbind(dev, intf); 1628 1629 usb_kill_urb(dev->interrupt); 1630 usb_free_urb(dev->interrupt); 1631 kfree(dev->padding_pkt); 1632 1633 free_netdev(net); 1634 } 1635 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1636 1637 static const struct net_device_ops usbnet_netdev_ops = { 1638 .ndo_open = usbnet_open, 1639 .ndo_stop = usbnet_stop, 1640 .ndo_start_xmit = usbnet_start_xmit, 1641 .ndo_tx_timeout = usbnet_tx_timeout, 1642 .ndo_set_rx_mode = usbnet_set_rx_mode, 1643 .ndo_change_mtu = usbnet_change_mtu, 1644 .ndo_set_mac_address = eth_mac_addr, 1645 .ndo_validate_addr = eth_validate_addr, 1646 }; 1647 1648 /*-------------------------------------------------------------------------*/ 1649 1650 // precondition: never called in_interrupt 1651 1652 static const struct device_type wlan_type = { 1653 .name = "wlan", 1654 }; 1655 1656 static const struct device_type wwan_type = { 1657 .name = "wwan", 1658 }; 1659 1660 int 1661 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1662 { 1663 struct usbnet *dev; 1664 struct net_device *net; 1665 struct usb_host_interface *interface; 1666 const struct driver_info *info; 1667 struct usb_device *xdev; 1668 int status; 1669 const char *name; 1670 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1671 1672 /* usbnet already took usb runtime pm, so have to enable the feature 1673 * for usb interface, otherwise usb_autopm_get_interface may return 1674 * failure if RUNTIME_PM is enabled. 1675 */ 1676 if (!driver->supports_autosuspend) { 1677 driver->supports_autosuspend = 1; 1678 pm_runtime_enable(&udev->dev); 1679 } 1680 1681 name = udev->dev.driver->name; 1682 info = (const struct driver_info *) prod->driver_info; 1683 if (!info) { 1684 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1685 return -ENODEV; 1686 } 1687 xdev = interface_to_usbdev (udev); 1688 interface = udev->cur_altsetting; 1689 1690 status = -ENOMEM; 1691 1692 // set up our own records 1693 net = alloc_etherdev(sizeof(*dev)); 1694 if (!net) 1695 goto out; 1696 1697 /* netdev_printk() needs this so do it as early as possible */ 1698 SET_NETDEV_DEV(net, &udev->dev); 1699 1700 dev = netdev_priv(net); 1701 dev->udev = xdev; 1702 dev->intf = udev; 1703 dev->driver_info = info; 1704 dev->driver_name = name; 1705 dev->rx_speed = SPEED_UNSET; 1706 dev->tx_speed = SPEED_UNSET; 1707 1708 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1709 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1710 init_waitqueue_head(&dev->wait); 1711 skb_queue_head_init (&dev->rxq); 1712 skb_queue_head_init (&dev->txq); 1713 skb_queue_head_init (&dev->done); 1714 skb_queue_head_init(&dev->rxq_pause); 1715 tasklet_setup(&dev->bh, usbnet_bh_tasklet); 1716 INIT_WORK (&dev->kevent, usbnet_deferred_kevent); 1717 init_usb_anchor(&dev->deferred); 1718 timer_setup(&dev->delay, usbnet_bh, 0); 1719 mutex_init (&dev->phy_mutex); 1720 mutex_init(&dev->interrupt_mutex); 1721 dev->interrupt_count = 0; 1722 1723 dev->net = net; 1724 strscpy(net->name, "usb%d", sizeof(net->name)); 1725 1726 /* rx and tx sides can use different message sizes; 1727 * bind() should set rx_urb_size in that case. 1728 */ 1729 dev->hard_mtu = net->mtu + net->hard_header_len; 1730 net->min_mtu = 0; 1731 net->max_mtu = ETH_MAX_MTU; 1732 net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1733 1734 net->netdev_ops = &usbnet_netdev_ops; 1735 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1736 net->ethtool_ops = &usbnet_ethtool_ops; 1737 net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1738 1739 // allow device-specific bind/init procedures 1740 // NOTE net->name still not usable ... 1741 if (info->bind) { 1742 status = info->bind (dev, udev); 1743 if (status < 0) 1744 goto out1; 1745 1746 // heuristic: "usb%d" for links we know are two-host, 1747 // else "eth%d" when there's reasonable doubt. userspace 1748 // can rename the link if it knows better. 1749 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1750 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1751 (net->dev_addr [0] & 0x02) == 0)) 1752 strscpy(net->name, "eth%d", sizeof(net->name)); 1753 /* WLAN devices should always be named "wlan%d" */ 1754 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1755 strscpy(net->name, "wlan%d", sizeof(net->name)); 1756 /* WWAN devices should always be named "wwan%d" */ 1757 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1758 strscpy(net->name, "wwan%d", sizeof(net->name)); 1759 1760 /* devices that cannot do ARP */ 1761 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1762 net->flags |= IFF_NOARP; 1763 1764 /* maybe the remote can't receive an Ethernet MTU */ 1765 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1766 net->mtu = dev->hard_mtu - net->hard_header_len; 1767 } else if (!info->in || !info->out) 1768 status = usbnet_get_endpoints (dev, udev); 1769 else { 1770 u8 ep_addrs[3] = { 1771 info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 1772 }; 1773 1774 dev->in = usb_rcvbulkpipe (xdev, info->in); 1775 dev->out = usb_sndbulkpipe (xdev, info->out); 1776 if (!(info->flags & FLAG_NO_SETINT)) 1777 status = usb_set_interface (xdev, 1778 interface->desc.bInterfaceNumber, 1779 interface->desc.bAlternateSetting); 1780 else 1781 status = 0; 1782 1783 if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) 1784 status = -EINVAL; 1785 } 1786 if (status >= 0 && dev->status) 1787 status = init_status (dev, udev); 1788 if (status < 0) 1789 goto out3; 1790 1791 if (!dev->rx_urb_size) 1792 dev->rx_urb_size = dev->hard_mtu; 1793 dev->maxpacket = usb_maxpacket(dev->udev, dev->out); 1794 if (dev->maxpacket == 0) { 1795 /* that is a broken device */ 1796 status = -ENODEV; 1797 goto out4; 1798 } 1799 1800 /* this flags the device for user space */ 1801 if (!is_valid_ether_addr(net->dev_addr)) 1802 eth_hw_addr_random(net); 1803 1804 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1805 SET_NETDEV_DEVTYPE(net, &wlan_type); 1806 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1807 SET_NETDEV_DEVTYPE(net, &wwan_type); 1808 1809 /* initialize max rx_qlen and tx_qlen */ 1810 usbnet_update_max_qlen(dev); 1811 1812 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1813 !(info->flags & FLAG_MULTI_PACKET)) { 1814 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1815 if (!dev->padding_pkt) { 1816 status = -ENOMEM; 1817 goto out4; 1818 } 1819 } 1820 1821 status = register_netdev (net); 1822 if (status) 1823 goto out5; 1824 netif_info(dev, probe, dev->net, 1825 "register '%s' at usb-%s-%s, %s, %pM\n", 1826 udev->dev.driver->name, 1827 xdev->bus->bus_name, xdev->devpath, 1828 dev->driver_info->description, 1829 net->dev_addr); 1830 1831 // ok, it's ready to go. 1832 usb_set_intfdata (udev, dev); 1833 1834 netif_device_attach (net); 1835 1836 if (dev->driver_info->flags & FLAG_LINK_INTR) 1837 usbnet_link_change(dev, 0, 0); 1838 1839 return 0; 1840 1841 out5: 1842 kfree(dev->padding_pkt); 1843 out4: 1844 usb_free_urb(dev->interrupt); 1845 out3: 1846 if (info->unbind) 1847 info->unbind (dev, udev); 1848 out1: 1849 /* subdrivers must undo all they did in bind() if they 1850 * fail it, but we may fail later and a deferred kevent 1851 * may trigger an error resubmitting itself and, worse, 1852 * schedule a timer. So we kill it all just in case. 1853 */ 1854 cancel_work_sync(&dev->kevent); 1855 del_timer_sync(&dev->delay); 1856 free_netdev(net); 1857 out: 1858 return status; 1859 } 1860 EXPORT_SYMBOL_GPL(usbnet_probe); 1861 1862 /*-------------------------------------------------------------------------*/ 1863 1864 /* 1865 * suspend the whole driver as soon as the first interface is suspended 1866 * resume only when the last interface is resumed 1867 */ 1868 1869 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1870 { 1871 struct usbnet *dev = usb_get_intfdata(intf); 1872 1873 if (!dev->suspend_count++) { 1874 spin_lock_irq(&dev->txq.lock); 1875 /* don't autosuspend while transmitting */ 1876 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1877 dev->suspend_count--; 1878 spin_unlock_irq(&dev->txq.lock); 1879 return -EBUSY; 1880 } else { 1881 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1882 spin_unlock_irq(&dev->txq.lock); 1883 } 1884 /* 1885 * accelerate emptying of the rx and queues, to avoid 1886 * having everything error out. 1887 */ 1888 netif_device_detach (dev->net); 1889 usbnet_terminate_urbs(dev); 1890 __usbnet_status_stop_force(dev); 1891 1892 /* 1893 * reattach so runtime management can use and 1894 * wake the device 1895 */ 1896 netif_device_attach (dev->net); 1897 } 1898 return 0; 1899 } 1900 EXPORT_SYMBOL_GPL(usbnet_suspend); 1901 1902 int usbnet_resume (struct usb_interface *intf) 1903 { 1904 struct usbnet *dev = usb_get_intfdata(intf); 1905 struct sk_buff *skb; 1906 struct urb *res; 1907 int retval; 1908 1909 if (!--dev->suspend_count) { 1910 /* resume interrupt URB if it was previously submitted */ 1911 __usbnet_status_start_force(dev, GFP_NOIO); 1912 1913 spin_lock_irq(&dev->txq.lock); 1914 while ((res = usb_get_from_anchor(&dev->deferred))) { 1915 1916 skb = (struct sk_buff *)res->context; 1917 retval = usb_submit_urb(res, GFP_ATOMIC); 1918 if (retval < 0) { 1919 dev_kfree_skb_any(skb); 1920 kfree(res->sg); 1921 usb_free_urb(res); 1922 usb_autopm_put_interface_async(dev->intf); 1923 } else { 1924 netif_trans_update(dev->net); 1925 __skb_queue_tail(&dev->txq, skb); 1926 } 1927 } 1928 1929 smp_mb(); 1930 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1931 spin_unlock_irq(&dev->txq.lock); 1932 1933 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1934 /* handle remote wakeup ASAP 1935 * we cannot race against stop 1936 */ 1937 if (netif_device_present(dev->net) && 1938 !timer_pending(&dev->delay) && 1939 !test_bit(EVENT_RX_HALT, &dev->flags)) 1940 rx_alloc_submit(dev, GFP_NOIO); 1941 1942 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1943 netif_tx_wake_all_queues(dev->net); 1944 tasklet_schedule (&dev->bh); 1945 } 1946 } 1947 1948 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1949 usb_autopm_get_interface_no_resume(intf); 1950 1951 return 0; 1952 } 1953 EXPORT_SYMBOL_GPL(usbnet_resume); 1954 1955 /* 1956 * Either a subdriver implements manage_power, then it is assumed to always 1957 * be ready to be suspended or it reports the readiness to be suspended 1958 * explicitly 1959 */ 1960 void usbnet_device_suggests_idle(struct usbnet *dev) 1961 { 1962 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1963 dev->intf->needs_remote_wakeup = 1; 1964 usb_autopm_put_interface_async(dev->intf); 1965 } 1966 } 1967 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1968 1969 /* 1970 * For devices that can do without special commands 1971 */ 1972 int usbnet_manage_power(struct usbnet *dev, int on) 1973 { 1974 dev->intf->needs_remote_wakeup = on; 1975 return 0; 1976 } 1977 EXPORT_SYMBOL(usbnet_manage_power); 1978 1979 void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) 1980 { 1981 /* update link after link is reseted */ 1982 if (link && !need_reset) 1983 netif_carrier_on(dev->net); 1984 else 1985 netif_carrier_off(dev->net); 1986 1987 if (need_reset && link) 1988 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 1989 else 1990 usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); 1991 } 1992 EXPORT_SYMBOL(usbnet_link_change); 1993 1994 /*-------------------------------------------------------------------------*/ 1995 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1996 u16 value, u16 index, void *data, u16 size) 1997 { 1998 void *buf = NULL; 1999 int err = -ENOMEM; 2000 2001 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 2002 " value=0x%04x index=0x%04x size=%d\n", 2003 cmd, reqtype, value, index, size); 2004 2005 if (size) { 2006 buf = kmalloc(size, GFP_NOIO); 2007 if (!buf) 2008 goto out; 2009 } 2010 2011 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 2012 cmd, reqtype, value, index, buf, size, 2013 USB_CTRL_GET_TIMEOUT); 2014 if (err > 0 && err <= size) { 2015 if (data) 2016 memcpy(data, buf, err); 2017 else 2018 netdev_dbg(dev->net, 2019 "Huh? Data requested but thrown away.\n"); 2020 } 2021 kfree(buf); 2022 out: 2023 return err; 2024 } 2025 2026 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2027 u16 value, u16 index, const void *data, 2028 u16 size) 2029 { 2030 void *buf = NULL; 2031 int err = -ENOMEM; 2032 2033 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2034 " value=0x%04x index=0x%04x size=%d\n", 2035 cmd, reqtype, value, index, size); 2036 2037 if (data) { 2038 buf = kmemdup(data, size, GFP_NOIO); 2039 if (!buf) 2040 goto out; 2041 } else { 2042 if (size) { 2043 WARN_ON_ONCE(1); 2044 err = -EINVAL; 2045 goto out; 2046 } 2047 } 2048 2049 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 2050 cmd, reqtype, value, index, buf, size, 2051 USB_CTRL_SET_TIMEOUT); 2052 kfree(buf); 2053 2054 out: 2055 return err; 2056 } 2057 2058 /* 2059 * The function can't be called inside suspend/resume callback, 2060 * otherwise deadlock will be caused. 2061 */ 2062 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2063 u16 value, u16 index, void *data, u16 size) 2064 { 2065 int ret; 2066 2067 if (usb_autopm_get_interface(dev->intf) < 0) 2068 return -ENODEV; 2069 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2070 data, size); 2071 usb_autopm_put_interface(dev->intf); 2072 return ret; 2073 } 2074 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 2075 2076 /* 2077 * The function can't be called inside suspend/resume callback, 2078 * otherwise deadlock will be caused. 2079 */ 2080 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2081 u16 value, u16 index, const void *data, u16 size) 2082 { 2083 int ret; 2084 2085 if (usb_autopm_get_interface(dev->intf) < 0) 2086 return -ENODEV; 2087 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2088 data, size); 2089 usb_autopm_put_interface(dev->intf); 2090 return ret; 2091 } 2092 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 2093 2094 /* 2095 * The function can be called inside suspend/resume callback safely 2096 * and should only be called by suspend/resume callback generally. 2097 */ 2098 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2099 u16 value, u16 index, void *data, u16 size) 2100 { 2101 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2102 data, size); 2103 } 2104 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 2105 2106 /* 2107 * The function can be called inside suspend/resume callback safely 2108 * and should only be called by suspend/resume callback generally. 2109 */ 2110 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2111 u16 value, u16 index, const void *data, 2112 u16 size) 2113 { 2114 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2115 data, size); 2116 } 2117 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 2118 2119 static void usbnet_async_cmd_cb(struct urb *urb) 2120 { 2121 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 2122 int status = urb->status; 2123 2124 if (status < 0) 2125 dev_dbg(&urb->dev->dev, "%s failed with %d", 2126 __func__, status); 2127 2128 kfree(req); 2129 usb_free_urb(urb); 2130 } 2131 2132 /* 2133 * The caller must make sure that device can't be put into suspend 2134 * state until the control URB completes. 2135 */ 2136 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 2137 u16 value, u16 index, const void *data, u16 size) 2138 { 2139 struct usb_ctrlrequest *req; 2140 struct urb *urb; 2141 int err = -ENOMEM; 2142 void *buf = NULL; 2143 2144 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2145 " value=0x%04x index=0x%04x size=%d\n", 2146 cmd, reqtype, value, index, size); 2147 2148 urb = usb_alloc_urb(0, GFP_ATOMIC); 2149 if (!urb) 2150 goto fail; 2151 2152 if (data) { 2153 buf = kmemdup(data, size, GFP_ATOMIC); 2154 if (!buf) { 2155 netdev_err(dev->net, "Error allocating buffer" 2156 " in %s!\n", __func__); 2157 goto fail_free_urb; 2158 } 2159 } 2160 2161 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 2162 if (!req) 2163 goto fail_free_buf; 2164 2165 req->bRequestType = reqtype; 2166 req->bRequest = cmd; 2167 req->wValue = cpu_to_le16(value); 2168 req->wIndex = cpu_to_le16(index); 2169 req->wLength = cpu_to_le16(size); 2170 2171 usb_fill_control_urb(urb, dev->udev, 2172 usb_sndctrlpipe(dev->udev, 0), 2173 (void *)req, buf, size, 2174 usbnet_async_cmd_cb, req); 2175 urb->transfer_flags |= URB_FREE_BUFFER; 2176 2177 err = usb_submit_urb(urb, GFP_ATOMIC); 2178 if (err < 0) { 2179 netdev_err(dev->net, "Error submitting the control" 2180 " message: status=%d\n", err); 2181 goto fail_free_all; 2182 } 2183 return 0; 2184 2185 fail_free_all: 2186 kfree(req); 2187 fail_free_buf: 2188 kfree(buf); 2189 /* 2190 * avoid a double free 2191 * needed because the flag can be set only 2192 * after filling the URB 2193 */ 2194 urb->transfer_flags = 0; 2195 fail_free_urb: 2196 usb_free_urb(urb); 2197 fail: 2198 return err; 2199 2200 } 2201 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 2202 /*-------------------------------------------------------------------------*/ 2203 2204 static int __init usbnet_init(void) 2205 { 2206 /* Compiler should optimize this out. */ 2207 BUILD_BUG_ON( 2208 sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); 2209 2210 return 0; 2211 } 2212 module_init(usbnet_init); 2213 2214 static void __exit usbnet_exit(void) 2215 { 2216 } 2217 module_exit(usbnet_exit); 2218 2219 MODULE_AUTHOR("David Brownell"); 2220 MODULE_DESCRIPTION("USB network driver framework"); 2221 MODULE_LICENSE("GPL"); 2222