1 /* 2 * USB Skeleton driver - 2.2 3 * 4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation, version 2. 9 * 10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c 11 * but has been rewritten to be easier to read and use. 12 * 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/errno.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/module.h> 20 #include <linux/kref.h> 21 #include <linux/uaccess.h> 22 #include <linux/usb.h> 23 #include <linux/mutex.h> 24 25 26 /* Define these values to match your devices */ 27 #define USB_SKEL_VENDOR_ID 0xfff0 28 #define USB_SKEL_PRODUCT_ID 0xfff0 29 30 /* table of devices that work with this driver */ 31 static const struct usb_device_id skel_table[] = { 32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 33 { } /* Terminating entry */ 34 }; 35 MODULE_DEVICE_TABLE(usb, skel_table); 36 37 38 /* Get a minor range for your devices from the usb maintainer */ 39 #define USB_SKEL_MINOR_BASE 192 40 41 /* our private defines. if this grows any larger, use your own .h file */ 42 #define MAX_TRANSFER (PAGE_SIZE - 512) 43 /* MAX_TRANSFER is chosen so that the VM is not stressed by 44 allocations > PAGE_SIZE and the number of packets in a page 45 is an integer 512 is the largest possible packet on EHCI */ 46 #define WRITES_IN_FLIGHT 8 47 /* arbitrarily chosen */ 48 49 /* Structure to hold all of our device specific stuff */ 50 struct usb_skel { 51 struct usb_device *udev; /* the usb device for this device */ 52 struct usb_interface *interface; /* the interface for this device */ 53 struct semaphore limit_sem; /* limiting the number of writes in progress */ 54 struct usb_anchor submitted; /* in case we need to retract our submissions */ 55 struct urb *bulk_in_urb; /* the urb to read data with */ 56 unsigned char *bulk_in_buffer; /* the buffer to receive data */ 57 size_t bulk_in_size; /* the size of the receive buffer */ 58 size_t bulk_in_filled; /* number of bytes in the buffer */ 59 size_t bulk_in_copied; /* already copied to user space */ 60 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ 61 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ 62 int errors; /* the last request tanked */ 63 bool ongoing_read; /* a read is going on */ 64 bool processed_urb; /* indicates we haven't processed the urb */ 65 spinlock_t err_lock; /* lock for errors */ 66 struct kref kref; 67 struct mutex io_mutex; /* synchronize I/O with disconnect */ 68 struct completion bulk_in_completion; /* to wait for an ongoing read */ 69 }; 70 #define to_skel_dev(d) container_of(d, struct usb_skel, kref) 71 72 static struct usb_driver skel_driver; 73 static void skel_draw_down(struct usb_skel *dev); 74 75 static void skel_delete(struct kref *kref) 76 { 77 struct usb_skel *dev = to_skel_dev(kref); 78 79 usb_free_urb(dev->bulk_in_urb); 80 usb_put_dev(dev->udev); 81 kfree(dev->bulk_in_buffer); 82 kfree(dev); 83 } 84 85 static int skel_open(struct inode *inode, struct file *file) 86 { 87 struct usb_skel *dev; 88 struct usb_interface *interface; 89 int subminor; 90 int retval = 0; 91 92 subminor = iminor(inode); 93 94 interface = usb_find_interface(&skel_driver, subminor); 95 if (!interface) { 96 pr_err("%s - error, can't find device for minor %d\n", 97 __func__, subminor); 98 retval = -ENODEV; 99 goto exit; 100 } 101 102 dev = usb_get_intfdata(interface); 103 if (!dev) { 104 retval = -ENODEV; 105 goto exit; 106 } 107 108 /* increment our usage count for the device */ 109 kref_get(&dev->kref); 110 111 /* lock the device to allow correctly handling errors 112 * in resumption */ 113 mutex_lock(&dev->io_mutex); 114 115 retval = usb_autopm_get_interface(interface); 116 if (retval) 117 goto out_err; 118 119 /* save our object in the file's private structure */ 120 file->private_data = dev; 121 mutex_unlock(&dev->io_mutex); 122 123 exit: 124 return retval; 125 } 126 127 static int skel_release(struct inode *inode, struct file *file) 128 { 129 struct usb_skel *dev; 130 131 dev = file->private_data; 132 if (dev == NULL) 133 return -ENODEV; 134 135 /* allow the device to be autosuspended */ 136 mutex_lock(&dev->io_mutex); 137 if (dev->interface) 138 usb_autopm_put_interface(dev->interface); 139 mutex_unlock(&dev->io_mutex); 140 141 /* decrement the count on our device */ 142 kref_put(&dev->kref, skel_delete); 143 return 0; 144 } 145 146 static int skel_flush(struct file *file, fl_owner_t id) 147 { 148 struct usb_skel *dev; 149 int res; 150 151 dev = file->private_data; 152 if (dev == NULL) 153 return -ENODEV; 154 155 /* wait for io to stop */ 156 mutex_lock(&dev->io_mutex); 157 skel_draw_down(dev); 158 159 /* read out errors, leave subsequent opens a clean slate */ 160 spin_lock_irq(&dev->err_lock); 161 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0; 162 dev->errors = 0; 163 spin_unlock_irq(&dev->err_lock); 164 165 mutex_unlock(&dev->io_mutex); 166 167 return res; 168 } 169 170 static void skel_read_bulk_callback(struct urb *urb) 171 { 172 struct usb_skel *dev; 173 174 dev = urb->context; 175 176 spin_lock(&dev->err_lock); 177 /* sync/async unlink faults aren't errors */ 178 if (urb->status) { 179 if (!(urb->status == -ENOENT || 180 urb->status == -ECONNRESET || 181 urb->status == -ESHUTDOWN)) 182 dev_err(&dev->interface->dev, 183 "%s - nonzero write bulk status received: %d\n", 184 __func__, urb->status); 185 186 dev->errors = urb->status; 187 } else { 188 dev->bulk_in_filled = urb->actual_length; 189 } 190 dev->ongoing_read = 0; 191 spin_unlock(&dev->err_lock); 192 193 complete(&dev->bulk_in_completion); 194 } 195 196 static int skel_do_read_io(struct usb_skel *dev, size_t count) 197 { 198 int rv; 199 200 /* prepare a read */ 201 usb_fill_bulk_urb(dev->bulk_in_urb, 202 dev->udev, 203 usb_rcvbulkpipe(dev->udev, 204 dev->bulk_in_endpointAddr), 205 dev->bulk_in_buffer, 206 min(dev->bulk_in_size, count), 207 skel_read_bulk_callback, 208 dev); 209 /* tell everybody to leave the URB alone */ 210 spin_lock_irq(&dev->err_lock); 211 dev->ongoing_read = 1; 212 spin_unlock_irq(&dev->err_lock); 213 214 /* do it */ 215 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL); 216 if (rv < 0) { 217 dev_err(&dev->interface->dev, 218 "%s - failed submitting read urb, error %d\n", 219 __func__, rv); 220 dev->bulk_in_filled = 0; 221 rv = (rv == -ENOMEM) ? rv : -EIO; 222 spin_lock_irq(&dev->err_lock); 223 dev->ongoing_read = 0; 224 spin_unlock_irq(&dev->err_lock); 225 } 226 227 return rv; 228 } 229 230 static ssize_t skel_read(struct file *file, char *buffer, size_t count, 231 loff_t *ppos) 232 { 233 struct usb_skel *dev; 234 int rv; 235 bool ongoing_io; 236 237 dev = file->private_data; 238 239 /* if we cannot read at all, return EOF */ 240 if (!dev->bulk_in_urb || !count) 241 return 0; 242 243 /* no concurrent readers */ 244 rv = mutex_lock_interruptible(&dev->io_mutex); 245 if (rv < 0) 246 return rv; 247 248 if (!dev->interface) { /* disconnect() was called */ 249 rv = -ENODEV; 250 goto exit; 251 } 252 253 /* if IO is under way, we must not touch things */ 254 retry: 255 spin_lock_irq(&dev->err_lock); 256 ongoing_io = dev->ongoing_read; 257 spin_unlock_irq(&dev->err_lock); 258 259 if (ongoing_io) { 260 /* nonblocking IO shall not wait */ 261 if (file->f_flags & O_NONBLOCK) { 262 rv = -EAGAIN; 263 goto exit; 264 } 265 /* 266 * IO may take forever 267 * hence wait in an interruptible state 268 */ 269 rv = wait_for_completion_interruptible(&dev->bulk_in_completion); 270 if (rv < 0) 271 goto exit; 272 /* 273 * by waiting we also semiprocessed the urb 274 * we must finish now 275 */ 276 dev->bulk_in_copied = 0; 277 dev->processed_urb = 1; 278 } 279 280 if (!dev->processed_urb) { 281 /* 282 * the URB hasn't been processed 283 * do it now 284 */ 285 wait_for_completion(&dev->bulk_in_completion); 286 dev->bulk_in_copied = 0; 287 dev->processed_urb = 1; 288 } 289 290 /* errors must be reported */ 291 rv = dev->errors; 292 if (rv < 0) { 293 /* any error is reported once */ 294 dev->errors = 0; 295 /* to preserve notifications about reset */ 296 rv = (rv == -EPIPE) ? rv : -EIO; 297 /* no data to deliver */ 298 dev->bulk_in_filled = 0; 299 /* report it */ 300 goto exit; 301 } 302 303 /* 304 * if the buffer is filled we may satisfy the read 305 * else we need to start IO 306 */ 307 308 if (dev->bulk_in_filled) { 309 /* we had read data */ 310 size_t available = dev->bulk_in_filled - dev->bulk_in_copied; 311 size_t chunk = min(available, count); 312 313 if (!available) { 314 /* 315 * all data has been used 316 * actual IO needs to be done 317 */ 318 rv = skel_do_read_io(dev, count); 319 if (rv < 0) 320 goto exit; 321 else 322 goto retry; 323 } 324 /* 325 * data is available 326 * chunk tells us how much shall be copied 327 */ 328 329 if (copy_to_user(buffer, 330 dev->bulk_in_buffer + dev->bulk_in_copied, 331 chunk)) 332 rv = -EFAULT; 333 else 334 rv = chunk; 335 336 dev->bulk_in_copied += chunk; 337 338 /* 339 * if we are asked for more than we have, 340 * we start IO but don't wait 341 */ 342 if (available < count) 343 skel_do_read_io(dev, count - chunk); 344 } else { 345 /* no data in the buffer */ 346 rv = skel_do_read_io(dev, count); 347 if (rv < 0) 348 goto exit; 349 else if (!(file->f_flags & O_NONBLOCK)) 350 goto retry; 351 rv = -EAGAIN; 352 } 353 exit: 354 mutex_unlock(&dev->io_mutex); 355 return rv; 356 } 357 358 static void skel_write_bulk_callback(struct urb *urb) 359 { 360 struct usb_skel *dev; 361 362 dev = urb->context; 363 364 /* sync/async unlink faults aren't errors */ 365 if (urb->status) { 366 if (!(urb->status == -ENOENT || 367 urb->status == -ECONNRESET || 368 urb->status == -ESHUTDOWN)) 369 dev_err(&dev->interface->dev, 370 "%s - nonzero write bulk status received: %d\n", 371 __func__, urb->status); 372 373 spin_lock(&dev->err_lock); 374 dev->errors = urb->status; 375 spin_unlock(&dev->err_lock); 376 } 377 378 /* free up our allocated buffer */ 379 usb_free_coherent(urb->dev, urb->transfer_buffer_length, 380 urb->transfer_buffer, urb->transfer_dma); 381 up(&dev->limit_sem); 382 } 383 384 static ssize_t skel_write(struct file *file, const char *user_buffer, 385 size_t count, loff_t *ppos) 386 { 387 struct usb_skel *dev; 388 int retval = 0; 389 struct urb *urb = NULL; 390 char *buf = NULL; 391 size_t writesize = min(count, (size_t)MAX_TRANSFER); 392 393 dev = file->private_data; 394 395 /* verify that we actually have some data to write */ 396 if (count == 0) 397 goto exit; 398 399 /* 400 * limit the number of URBs in flight to stop a user from using up all 401 * RAM 402 */ 403 if (!(file->f_flags & O_NONBLOCK)) { 404 if (down_interruptible(&dev->limit_sem)) { 405 retval = -ERESTARTSYS; 406 goto exit; 407 } 408 } else { 409 if (down_trylock(&dev->limit_sem)) { 410 retval = -EAGAIN; 411 goto exit; 412 } 413 } 414 415 spin_lock_irq(&dev->err_lock); 416 retval = dev->errors; 417 if (retval < 0) { 418 /* any error is reported once */ 419 dev->errors = 0; 420 /* to preserve notifications about reset */ 421 retval = (retval == -EPIPE) ? retval : -EIO; 422 } 423 spin_unlock_irq(&dev->err_lock); 424 if (retval < 0) 425 goto error; 426 427 /* create a urb, and a buffer for it, and copy the data to the urb */ 428 urb = usb_alloc_urb(0, GFP_KERNEL); 429 if (!urb) { 430 retval = -ENOMEM; 431 goto error; 432 } 433 434 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL, 435 &urb->transfer_dma); 436 if (!buf) { 437 retval = -ENOMEM; 438 goto error; 439 } 440 441 if (copy_from_user(buf, user_buffer, writesize)) { 442 retval = -EFAULT; 443 goto error; 444 } 445 446 /* this lock makes sure we don't submit URBs to gone devices */ 447 mutex_lock(&dev->io_mutex); 448 if (!dev->interface) { /* disconnect() was called */ 449 mutex_unlock(&dev->io_mutex); 450 retval = -ENODEV; 451 goto error; 452 } 453 454 /* initialize the urb properly */ 455 usb_fill_bulk_urb(urb, dev->udev, 456 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), 457 buf, writesize, skel_write_bulk_callback, dev); 458 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 459 usb_anchor_urb(urb, &dev->submitted); 460 461 /* send the data out the bulk port */ 462 retval = usb_submit_urb(urb, GFP_KERNEL); 463 mutex_unlock(&dev->io_mutex); 464 if (retval) { 465 dev_err(&dev->interface->dev, 466 "%s - failed submitting write urb, error %d\n", 467 __func__, retval); 468 goto error_unanchor; 469 } 470 471 /* 472 * release our reference to this urb, the USB core will eventually free 473 * it entirely 474 */ 475 usb_free_urb(urb); 476 477 478 return writesize; 479 480 error_unanchor: 481 usb_unanchor_urb(urb); 482 error: 483 if (urb) { 484 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma); 485 usb_free_urb(urb); 486 } 487 up(&dev->limit_sem); 488 489 exit: 490 return retval; 491 } 492 493 static const struct file_operations skel_fops = { 494 .owner = THIS_MODULE, 495 .read = skel_read, 496 .write = skel_write, 497 .open = skel_open, 498 .release = skel_release, 499 .flush = skel_flush, 500 .llseek = noop_llseek, 501 }; 502 503 /* 504 * usb class driver info in order to get a minor number from the usb core, 505 * and to have the device registered with the driver core 506 */ 507 static struct usb_class_driver skel_class = { 508 .name = "skel%d", 509 .fops = &skel_fops, 510 .minor_base = USB_SKEL_MINOR_BASE, 511 }; 512 513 static int skel_probe(struct usb_interface *interface, 514 const struct usb_device_id *id) 515 { 516 struct usb_skel *dev; 517 struct usb_host_interface *iface_desc; 518 struct usb_endpoint_descriptor *endpoint; 519 size_t buffer_size; 520 int i; 521 int retval = -ENOMEM; 522 523 /* allocate memory for our device state and initialize it */ 524 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 525 if (!dev) { 526 dev_err(&interface->dev, "Out of memory\n"); 527 goto error; 528 } 529 kref_init(&dev->kref); 530 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT); 531 mutex_init(&dev->io_mutex); 532 spin_lock_init(&dev->err_lock); 533 init_usb_anchor(&dev->submitted); 534 init_completion(&dev->bulk_in_completion); 535 536 dev->udev = usb_get_dev(interface_to_usbdev(interface)); 537 dev->interface = interface; 538 539 /* set up the endpoint information */ 540 /* use only the first bulk-in and bulk-out endpoints */ 541 iface_desc = interface->cur_altsetting; 542 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 543 endpoint = &iface_desc->endpoint[i].desc; 544 545 if (!dev->bulk_in_endpointAddr && 546 usb_endpoint_is_bulk_in(endpoint)) { 547 /* we found a bulk in endpoint */ 548 buffer_size = usb_endpoint_maxp(endpoint); 549 dev->bulk_in_size = buffer_size; 550 dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; 551 dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); 552 if (!dev->bulk_in_buffer) { 553 dev_err(&interface->dev, 554 "Could not allocate bulk_in_buffer\n"); 555 goto error; 556 } 557 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL); 558 if (!dev->bulk_in_urb) { 559 dev_err(&interface->dev, 560 "Could not allocate bulk_in_urb\n"); 561 goto error; 562 } 563 } 564 565 if (!dev->bulk_out_endpointAddr && 566 usb_endpoint_is_bulk_out(endpoint)) { 567 /* we found a bulk out endpoint */ 568 dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; 569 } 570 } 571 if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) { 572 dev_err(&interface->dev, 573 "Could not find both bulk-in and bulk-out endpoints\n"); 574 goto error; 575 } 576 577 /* save our data pointer in this interface device */ 578 usb_set_intfdata(interface, dev); 579 580 /* we can register the device now, as it is ready */ 581 retval = usb_register_dev(interface, &skel_class); 582 if (retval) { 583 /* something prevented us from registering this driver */ 584 dev_err(&interface->dev, 585 "Not able to get a minor for this device.\n"); 586 usb_set_intfdata(interface, NULL); 587 goto error; 588 } 589 590 /* let the user know what node this device is now attached to */ 591 dev_info(&interface->dev, 592 "USB Skeleton device now attached to USBSkel-%d", 593 interface->minor); 594 return 0; 595 596 error: 597 if (dev) 598 /* this frees allocated memory */ 599 kref_put(&dev->kref, skel_delete); 600 return retval; 601 } 602 603 static void skel_disconnect(struct usb_interface *interface) 604 { 605 struct usb_skel *dev; 606 int minor = interface->minor; 607 608 dev = usb_get_intfdata(interface); 609 usb_set_intfdata(interface, NULL); 610 611 /* give back our minor */ 612 usb_deregister_dev(interface, &skel_class); 613 614 /* prevent more I/O from starting */ 615 mutex_lock(&dev->io_mutex); 616 dev->interface = NULL; 617 mutex_unlock(&dev->io_mutex); 618 619 usb_kill_anchored_urbs(&dev->submitted); 620 621 /* decrement our usage count */ 622 kref_put(&dev->kref, skel_delete); 623 624 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor); 625 } 626 627 static void skel_draw_down(struct usb_skel *dev) 628 { 629 int time; 630 631 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); 632 if (!time) 633 usb_kill_anchored_urbs(&dev->submitted); 634 usb_kill_urb(dev->bulk_in_urb); 635 } 636 637 static int skel_suspend(struct usb_interface *intf, pm_message_t message) 638 { 639 struct usb_skel *dev = usb_get_intfdata(intf); 640 641 if (!dev) 642 return 0; 643 skel_draw_down(dev); 644 return 0; 645 } 646 647 static int skel_resume(struct usb_interface *intf) 648 { 649 return 0; 650 } 651 652 static int skel_pre_reset(struct usb_interface *intf) 653 { 654 struct usb_skel *dev = usb_get_intfdata(intf); 655 656 mutex_lock(&dev->io_mutex); 657 skel_draw_down(dev); 658 659 return 0; 660 } 661 662 static int skel_post_reset(struct usb_interface *intf) 663 { 664 struct usb_skel *dev = usb_get_intfdata(intf); 665 666 /* we are sure no URBs are active - no locking needed */ 667 dev->errors = -EPIPE; 668 mutex_unlock(&dev->io_mutex); 669 670 return 0; 671 } 672 673 static struct usb_driver skel_driver = { 674 .name = "skeleton", 675 .probe = skel_probe, 676 .disconnect = skel_disconnect, 677 .suspend = skel_suspend, 678 .resume = skel_resume, 679 .pre_reset = skel_pre_reset, 680 .post_reset = skel_post_reset, 681 .id_table = skel_table, 682 .supports_autosuspend = 1, 683 }; 684 685 module_usb_driver(skel_driver); 686 687 MODULE_LICENSE("GPL"); 688