1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Copyright (c) 2020 Google, Inc.
7 * Author: Andrey Konovalov <andreyknvl@gmail.com>
8 */
9
10 #include <linux/compiler.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/idr.h>
15 #include <linux/kref.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/semaphore.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include <linux/usb.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/ch11.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/composite.h>
29
30 #include <uapi/linux/usb/raw_gadget.h>
31
32 #define DRIVER_DESC "USB Raw Gadget"
33 #define DRIVER_NAME "raw-gadget"
34
35 MODULE_DESCRIPTION(DRIVER_DESC);
36 MODULE_AUTHOR("Andrey Konovalov");
37 MODULE_LICENSE("GPL");
38
39 /*----------------------------------------------------------------------*/
40
41 static DEFINE_IDA(driver_id_numbers);
42 #define DRIVER_DRIVER_NAME_LENGTH_MAX 32
43 #define USB_RAW_IO_LENGTH_MAX KMALLOC_MAX_SIZE
44
45 #define RAW_EVENT_QUEUE_SIZE 16
46
47 struct raw_event_queue {
48 /* See the comment in raw_event_queue_fetch() for locking details. */
49 spinlock_t lock;
50 struct semaphore sema;
51 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
52 int size;
53 };
54
raw_event_queue_init(struct raw_event_queue * queue)55 static void raw_event_queue_init(struct raw_event_queue *queue)
56 {
57 spin_lock_init(&queue->lock);
58 sema_init(&queue->sema, 0);
59 queue->size = 0;
60 }
61
raw_event_queue_add(struct raw_event_queue * queue,enum usb_raw_event_type type,size_t length,const void * data)62 static int raw_event_queue_add(struct raw_event_queue *queue,
63 enum usb_raw_event_type type, size_t length, const void *data)
64 {
65 unsigned long flags;
66 struct usb_raw_event *event;
67
68 spin_lock_irqsave(&queue->lock, flags);
69 if (queue->size >= RAW_EVENT_QUEUE_SIZE) {
70 spin_unlock_irqrestore(&queue->lock, flags);
71 return -ENOMEM;
72 }
73 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
74 if (!event) {
75 spin_unlock_irqrestore(&queue->lock, flags);
76 return -ENOMEM;
77 }
78 event->type = type;
79 event->length = length;
80 if (event->length)
81 memcpy(&event->data[0], data, length);
82 queue->events[queue->size] = event;
83 queue->size++;
84 up(&queue->sema);
85 spin_unlock_irqrestore(&queue->lock, flags);
86 return 0;
87 }
88
raw_event_queue_fetch(struct raw_event_queue * queue)89 static struct usb_raw_event *raw_event_queue_fetch(
90 struct raw_event_queue *queue)
91 {
92 int ret;
93 unsigned long flags;
94 struct usb_raw_event *event;
95
96 /*
97 * This function can be called concurrently. We first check that
98 * there's at least one event queued by decrementing the semaphore,
99 * and then take the lock to protect queue struct fields.
100 */
101 ret = down_interruptible(&queue->sema);
102 if (ret)
103 return ERR_PTR(ret);
104 spin_lock_irqsave(&queue->lock, flags);
105 /*
106 * queue->size must have the same value as queue->sema counter (before
107 * the down_interruptible() call above), so this check is a fail-safe.
108 */
109 if (WARN_ON(!queue->size)) {
110 spin_unlock_irqrestore(&queue->lock, flags);
111 return ERR_PTR(-ENODEV);
112 }
113 event = queue->events[0];
114 queue->size--;
115 memmove(&queue->events[0], &queue->events[1],
116 queue->size * sizeof(queue->events[0]));
117 spin_unlock_irqrestore(&queue->lock, flags);
118 return event;
119 }
120
raw_event_queue_destroy(struct raw_event_queue * queue)121 static void raw_event_queue_destroy(struct raw_event_queue *queue)
122 {
123 int i;
124
125 for (i = 0; i < queue->size; i++)
126 kfree(queue->events[i]);
127 queue->size = 0;
128 }
129
130 /*----------------------------------------------------------------------*/
131
132 struct raw_dev;
133
134 enum ep_state {
135 STATE_EP_DISABLED,
136 STATE_EP_ENABLED,
137 };
138
139 struct raw_ep {
140 struct raw_dev *dev;
141 enum ep_state state;
142 struct usb_ep *ep;
143 u8 addr;
144 struct usb_request *req;
145 bool urb_queued;
146 bool disabling;
147 ssize_t status;
148 };
149
150 enum dev_state {
151 STATE_DEV_INVALID = 0,
152 STATE_DEV_OPENED,
153 STATE_DEV_INITIALIZED,
154 STATE_DEV_REGISTERING,
155 STATE_DEV_RUNNING,
156 STATE_DEV_CLOSED,
157 STATE_DEV_FAILED
158 };
159
160 struct raw_dev {
161 struct kref count;
162 spinlock_t lock;
163
164 const char *udc_name;
165 struct usb_gadget_driver driver;
166
167 /* Reference to misc device: */
168 struct device *dev;
169
170 /* Make driver names unique */
171 int driver_id_number;
172
173 /* Protected by lock: */
174 enum dev_state state;
175 bool gadget_registered;
176 struct usb_gadget *gadget;
177 struct usb_request *req;
178 bool ep0_in_pending;
179 bool ep0_out_pending;
180 bool ep0_urb_queued;
181 ssize_t ep0_status;
182 struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
183 int eps_num;
184
185 struct completion ep0_done;
186 struct raw_event_queue queue;
187 };
188
dev_new(void)189 static struct raw_dev *dev_new(void)
190 {
191 struct raw_dev *dev;
192
193 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
194 if (!dev)
195 return NULL;
196 /* Matches kref_put() in raw_release(). */
197 kref_init(&dev->count);
198 spin_lock_init(&dev->lock);
199 init_completion(&dev->ep0_done);
200 raw_event_queue_init(&dev->queue);
201 dev->driver_id_number = -1;
202 return dev;
203 }
204
dev_free(struct kref * kref)205 static void dev_free(struct kref *kref)
206 {
207 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
208 int i;
209
210 kfree(dev->udc_name);
211 kfree(dev->driver.udc_name);
212 kfree(dev->driver.driver.name);
213 if (dev->driver_id_number >= 0)
214 ida_free(&driver_id_numbers, dev->driver_id_number);
215 if (dev->req) {
216 if (dev->ep0_urb_queued)
217 usb_ep_dequeue(dev->gadget->ep0, dev->req);
218 usb_ep_free_request(dev->gadget->ep0, dev->req);
219 }
220 raw_event_queue_destroy(&dev->queue);
221 for (i = 0; i < dev->eps_num; i++) {
222 if (dev->eps[i].state == STATE_EP_DISABLED)
223 continue;
224 usb_ep_disable(dev->eps[i].ep);
225 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
226 kfree(dev->eps[i].ep->desc);
227 dev->eps[i].state = STATE_EP_DISABLED;
228 }
229 kfree(dev);
230 }
231
232 /*----------------------------------------------------------------------*/
233
raw_queue_event(struct raw_dev * dev,enum usb_raw_event_type type,size_t length,const void * data)234 static int raw_queue_event(struct raw_dev *dev,
235 enum usb_raw_event_type type, size_t length, const void *data)
236 {
237 int ret = 0;
238 unsigned long flags;
239
240 ret = raw_event_queue_add(&dev->queue, type, length, data);
241 if (ret < 0) {
242 spin_lock_irqsave(&dev->lock, flags);
243 dev->state = STATE_DEV_FAILED;
244 spin_unlock_irqrestore(&dev->lock, flags);
245 }
246 return ret;
247 }
248
gadget_ep0_complete(struct usb_ep * ep,struct usb_request * req)249 static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
250 {
251 struct raw_dev *dev = req->context;
252 unsigned long flags;
253
254 spin_lock_irqsave(&dev->lock, flags);
255 if (req->status)
256 dev->ep0_status = req->status;
257 else
258 dev->ep0_status = req->actual;
259 if (dev->ep0_in_pending)
260 dev->ep0_in_pending = false;
261 else
262 dev->ep0_out_pending = false;
263 spin_unlock_irqrestore(&dev->lock, flags);
264
265 complete(&dev->ep0_done);
266 }
267
get_ep_addr(const char * name)268 static u8 get_ep_addr(const char *name)
269 {
270 /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
271 * parse the endpoint address from its name. We deliberately use
272 * deprecated simple_strtoul() function here, as the number isn't
273 * followed by '\0' nor '\n'.
274 */
275 if (isdigit(name[2]))
276 return simple_strtoul(&name[2], NULL, 10);
277 /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
278 return USB_RAW_EP_ADDR_ANY;
279 }
280
gadget_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)281 static int gadget_bind(struct usb_gadget *gadget,
282 struct usb_gadget_driver *driver)
283 {
284 int ret = 0, i = 0;
285 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
286 struct usb_request *req;
287 struct usb_ep *ep;
288 unsigned long flags;
289
290 if (strcmp(gadget->name, dev->udc_name) != 0)
291 return -ENODEV;
292
293 set_gadget_data(gadget, dev);
294 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
295 if (!req) {
296 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
297 set_gadget_data(gadget, NULL);
298 return -ENOMEM;
299 }
300
301 spin_lock_irqsave(&dev->lock, flags);
302 dev->req = req;
303 dev->req->context = dev;
304 dev->req->complete = gadget_ep0_complete;
305 dev->gadget = gadget;
306 gadget_for_each_ep(ep, dev->gadget) {
307 dev->eps[i].ep = ep;
308 dev->eps[i].addr = get_ep_addr(ep->name);
309 dev->eps[i].state = STATE_EP_DISABLED;
310 i++;
311 }
312 dev->eps_num = i;
313 spin_unlock_irqrestore(&dev->lock, flags);
314
315 dev_dbg(&gadget->dev, "gadget connected\n");
316 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
317 if (ret < 0) {
318 dev_err(&gadget->dev, "failed to queue connect event\n");
319 set_gadget_data(gadget, NULL);
320 return ret;
321 }
322
323 /* Matches kref_put() in gadget_unbind(). */
324 kref_get(&dev->count);
325 return ret;
326 }
327
gadget_unbind(struct usb_gadget * gadget)328 static void gadget_unbind(struct usb_gadget *gadget)
329 {
330 struct raw_dev *dev = get_gadget_data(gadget);
331
332 set_gadget_data(gadget, NULL);
333 /* Matches kref_get() in gadget_bind(). */
334 kref_put(&dev->count, dev_free);
335 }
336
gadget_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)337 static int gadget_setup(struct usb_gadget *gadget,
338 const struct usb_ctrlrequest *ctrl)
339 {
340 int ret = 0;
341 struct raw_dev *dev = get_gadget_data(gadget);
342 unsigned long flags;
343
344 spin_lock_irqsave(&dev->lock, flags);
345 if (dev->state != STATE_DEV_RUNNING) {
346 dev_err(&gadget->dev, "ignoring, device is not running\n");
347 ret = -ENODEV;
348 goto out_unlock;
349 }
350 if (dev->ep0_in_pending || dev->ep0_out_pending) {
351 dev_dbg(&gadget->dev, "stalling, request already pending\n");
352 ret = -EBUSY;
353 goto out_unlock;
354 }
355 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
356 dev->ep0_in_pending = true;
357 else
358 dev->ep0_out_pending = true;
359 spin_unlock_irqrestore(&dev->lock, flags);
360
361 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
362 if (ret < 0)
363 dev_err(&gadget->dev, "failed to queue control event\n");
364 goto out;
365
366 out_unlock:
367 spin_unlock_irqrestore(&dev->lock, flags);
368 out:
369 if (ret == 0 && ctrl->wLength == 0) {
370 /*
371 * Return USB_GADGET_DELAYED_STATUS as a workaround to stop
372 * some UDC drivers (e.g. dwc3) from automatically proceeding
373 * with the status stage for 0-length transfers.
374 * Should be removed once all UDC drivers are fixed to always
375 * delay the status stage until a response is queued to EP0.
376 */
377 return USB_GADGET_DELAYED_STATUS;
378 }
379 return ret;
380 }
381
gadget_disconnect(struct usb_gadget * gadget)382 static void gadget_disconnect(struct usb_gadget *gadget)
383 {
384 struct raw_dev *dev = get_gadget_data(gadget);
385 int ret;
386
387 dev_dbg(&gadget->dev, "gadget disconnected\n");
388 ret = raw_queue_event(dev, USB_RAW_EVENT_DISCONNECT, 0, NULL);
389 if (ret < 0)
390 dev_err(&gadget->dev, "failed to queue disconnect event\n");
391 }
gadget_suspend(struct usb_gadget * gadget)392 static void gadget_suspend(struct usb_gadget *gadget)
393 {
394 struct raw_dev *dev = get_gadget_data(gadget);
395 int ret;
396
397 dev_dbg(&gadget->dev, "gadget suspended\n");
398 ret = raw_queue_event(dev, USB_RAW_EVENT_SUSPEND, 0, NULL);
399 if (ret < 0)
400 dev_err(&gadget->dev, "failed to queue suspend event\n");
401 }
gadget_resume(struct usb_gadget * gadget)402 static void gadget_resume(struct usb_gadget *gadget)
403 {
404 struct raw_dev *dev = get_gadget_data(gadget);
405 int ret;
406
407 dev_dbg(&gadget->dev, "gadget resumed\n");
408 ret = raw_queue_event(dev, USB_RAW_EVENT_RESUME, 0, NULL);
409 if (ret < 0)
410 dev_err(&gadget->dev, "failed to queue resume event\n");
411 }
gadget_reset(struct usb_gadget * gadget)412 static void gadget_reset(struct usb_gadget *gadget)
413 {
414 struct raw_dev *dev = get_gadget_data(gadget);
415 int ret;
416
417 dev_dbg(&gadget->dev, "gadget reset\n");
418 ret = raw_queue_event(dev, USB_RAW_EVENT_RESET, 0, NULL);
419 if (ret < 0)
420 dev_err(&gadget->dev, "failed to queue reset event\n");
421 }
422
423 /*----------------------------------------------------------------------*/
424
425 static struct miscdevice raw_misc_device;
426
raw_open(struct inode * inode,struct file * fd)427 static int raw_open(struct inode *inode, struct file *fd)
428 {
429 struct raw_dev *dev;
430
431 /* Nonblocking I/O is not supported yet. */
432 if (fd->f_flags & O_NONBLOCK)
433 return -EINVAL;
434
435 dev = dev_new();
436 if (!dev)
437 return -ENOMEM;
438 fd->private_data = dev;
439 dev->state = STATE_DEV_OPENED;
440 dev->dev = raw_misc_device.this_device;
441 return 0;
442 }
443
raw_release(struct inode * inode,struct file * fd)444 static int raw_release(struct inode *inode, struct file *fd)
445 {
446 int ret = 0;
447 struct raw_dev *dev = fd->private_data;
448 unsigned long flags;
449 bool unregister = false;
450
451 spin_lock_irqsave(&dev->lock, flags);
452 dev->state = STATE_DEV_CLOSED;
453 if (!dev->gadget) {
454 spin_unlock_irqrestore(&dev->lock, flags);
455 goto out_put;
456 }
457 if (dev->gadget_registered)
458 unregister = true;
459 dev->gadget_registered = false;
460 spin_unlock_irqrestore(&dev->lock, flags);
461
462 if (unregister) {
463 ret = usb_gadget_unregister_driver(&dev->driver);
464 if (ret != 0)
465 dev_err(dev->dev,
466 "usb_gadget_unregister_driver() failed with %d\n",
467 ret);
468 /* Matches kref_get() in raw_ioctl_run(). */
469 kref_put(&dev->count, dev_free);
470 }
471
472 out_put:
473 /* Matches dev_new() in raw_open(). */
474 kref_put(&dev->count, dev_free);
475 return ret;
476 }
477
478 /*----------------------------------------------------------------------*/
479
raw_ioctl_init(struct raw_dev * dev,unsigned long value)480 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
481 {
482 int ret = 0;
483 int driver_id_number;
484 struct usb_raw_init arg;
485 char *udc_driver_name;
486 char *udc_device_name;
487 char *driver_driver_name;
488 unsigned long flags;
489
490 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
491 return -EFAULT;
492
493 switch (arg.speed) {
494 case USB_SPEED_UNKNOWN:
495 arg.speed = USB_SPEED_HIGH;
496 break;
497 case USB_SPEED_LOW:
498 case USB_SPEED_FULL:
499 case USB_SPEED_HIGH:
500 case USB_SPEED_SUPER:
501 break;
502 default:
503 return -EINVAL;
504 }
505
506 driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
507 if (driver_id_number < 0)
508 return driver_id_number;
509
510 driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
511 if (!driver_driver_name) {
512 ret = -ENOMEM;
513 goto out_free_driver_id_number;
514 }
515 snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
516 DRIVER_NAME ".%d", driver_id_number);
517
518 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
519 if (!udc_driver_name) {
520 ret = -ENOMEM;
521 goto out_free_driver_driver_name;
522 }
523 ret = strscpy(udc_driver_name, &arg.driver_name[0],
524 UDC_NAME_LENGTH_MAX);
525 if (ret < 0)
526 goto out_free_udc_driver_name;
527 ret = 0;
528
529 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
530 if (!udc_device_name) {
531 ret = -ENOMEM;
532 goto out_free_udc_driver_name;
533 }
534 ret = strscpy(udc_device_name, &arg.device_name[0],
535 UDC_NAME_LENGTH_MAX);
536 if (ret < 0)
537 goto out_free_udc_device_name;
538 ret = 0;
539
540 spin_lock_irqsave(&dev->lock, flags);
541 if (dev->state != STATE_DEV_OPENED) {
542 dev_dbg(dev->dev, "fail, device is not opened\n");
543 ret = -EINVAL;
544 goto out_unlock;
545 }
546 dev->udc_name = udc_driver_name;
547
548 dev->driver.function = DRIVER_DESC;
549 dev->driver.max_speed = arg.speed;
550 dev->driver.setup = gadget_setup;
551 dev->driver.disconnect = gadget_disconnect;
552 dev->driver.bind = gadget_bind;
553 dev->driver.unbind = gadget_unbind;
554 dev->driver.suspend = gadget_suspend;
555 dev->driver.resume = gadget_resume;
556 dev->driver.reset = gadget_reset;
557 dev->driver.driver.name = driver_driver_name;
558 dev->driver.udc_name = udc_device_name;
559 dev->driver.match_existing_only = 1;
560 dev->driver_id_number = driver_id_number;
561
562 dev->state = STATE_DEV_INITIALIZED;
563 spin_unlock_irqrestore(&dev->lock, flags);
564 return ret;
565
566 out_unlock:
567 spin_unlock_irqrestore(&dev->lock, flags);
568 out_free_udc_device_name:
569 kfree(udc_device_name);
570 out_free_udc_driver_name:
571 kfree(udc_driver_name);
572 out_free_driver_driver_name:
573 kfree(driver_driver_name);
574 out_free_driver_id_number:
575 ida_free(&driver_id_numbers, driver_id_number);
576 return ret;
577 }
578
raw_ioctl_run(struct raw_dev * dev,unsigned long value)579 static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
580 {
581 int ret = 0;
582 unsigned long flags;
583
584 if (value)
585 return -EINVAL;
586
587 spin_lock_irqsave(&dev->lock, flags);
588 if (dev->state != STATE_DEV_INITIALIZED) {
589 dev_dbg(dev->dev, "fail, device is not initialized\n");
590 ret = -EINVAL;
591 goto out_unlock;
592 }
593 dev->state = STATE_DEV_REGISTERING;
594 spin_unlock_irqrestore(&dev->lock, flags);
595
596 ret = usb_gadget_register_driver(&dev->driver);
597
598 spin_lock_irqsave(&dev->lock, flags);
599 if (ret) {
600 dev_err(dev->dev,
601 "fail, usb_gadget_register_driver returned %d\n", ret);
602 dev->state = STATE_DEV_FAILED;
603 goto out_unlock;
604 }
605 dev->gadget_registered = true;
606 dev->state = STATE_DEV_RUNNING;
607 /* Matches kref_put() in raw_release(). */
608 kref_get(&dev->count);
609
610 out_unlock:
611 spin_unlock_irqrestore(&dev->lock, flags);
612 return ret;
613 }
614
raw_ioctl_event_fetch(struct raw_dev * dev,unsigned long value)615 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
616 {
617 struct usb_raw_event arg;
618 unsigned long flags;
619 struct usb_raw_event *event;
620 uint32_t length;
621
622 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
623 return -EFAULT;
624
625 spin_lock_irqsave(&dev->lock, flags);
626 if (dev->state != STATE_DEV_RUNNING) {
627 dev_dbg(dev->dev, "fail, device is not running\n");
628 spin_unlock_irqrestore(&dev->lock, flags);
629 return -EINVAL;
630 }
631 if (!dev->gadget) {
632 dev_dbg(dev->dev, "fail, gadget is not bound\n");
633 spin_unlock_irqrestore(&dev->lock, flags);
634 return -EBUSY;
635 }
636 spin_unlock_irqrestore(&dev->lock, flags);
637
638 event = raw_event_queue_fetch(&dev->queue);
639 if (PTR_ERR(event) == -EINTR) {
640 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
641 return -EINTR;
642 }
643 if (IS_ERR(event)) {
644 dev_err(&dev->gadget->dev, "failed to fetch event\n");
645 spin_lock_irqsave(&dev->lock, flags);
646 dev->state = STATE_DEV_FAILED;
647 spin_unlock_irqrestore(&dev->lock, flags);
648 return -ENODEV;
649 }
650 length = min(arg.length, event->length);
651 if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
652 kfree(event);
653 return -EFAULT;
654 }
655
656 kfree(event);
657 return 0;
658 }
659
raw_alloc_io_data(struct usb_raw_ep_io * io,void __user * ptr,bool get_from_user)660 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
661 bool get_from_user)
662 {
663 void *data;
664
665 if (copy_from_user(io, ptr, sizeof(*io)))
666 return ERR_PTR(-EFAULT);
667 if (io->ep >= USB_RAW_EPS_NUM_MAX)
668 return ERR_PTR(-EINVAL);
669 if (!usb_raw_io_flags_valid(io->flags))
670 return ERR_PTR(-EINVAL);
671 if (io->length > USB_RAW_IO_LENGTH_MAX)
672 return ERR_PTR(-EINVAL);
673 if (get_from_user)
674 data = memdup_user(ptr + sizeof(*io), io->length);
675 else {
676 data = kmalloc(io->length, GFP_KERNEL);
677 if (!data)
678 data = ERR_PTR(-ENOMEM);
679 }
680 return data;
681 }
682
raw_process_ep0_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)683 static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
684 void *data, bool in)
685 {
686 int ret = 0;
687 unsigned long flags;
688
689 spin_lock_irqsave(&dev->lock, flags);
690 if (dev->state != STATE_DEV_RUNNING) {
691 dev_dbg(dev->dev, "fail, device is not running\n");
692 ret = -EINVAL;
693 goto out_unlock;
694 }
695 if (!dev->gadget) {
696 dev_dbg(dev->dev, "fail, gadget is not bound\n");
697 ret = -EBUSY;
698 goto out_unlock;
699 }
700 if (dev->ep0_urb_queued) {
701 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
702 ret = -EBUSY;
703 goto out_unlock;
704 }
705 if ((in && !dev->ep0_in_pending) ||
706 (!in && !dev->ep0_out_pending)) {
707 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
708 ret = -EBUSY;
709 goto out_unlock;
710 }
711 if (WARN_ON(in && dev->ep0_out_pending)) {
712 ret = -ENODEV;
713 dev->state = STATE_DEV_FAILED;
714 goto out_unlock;
715 }
716 if (WARN_ON(!in && dev->ep0_in_pending)) {
717 ret = -ENODEV;
718 dev->state = STATE_DEV_FAILED;
719 goto out_unlock;
720 }
721
722 dev->req->buf = data;
723 dev->req->length = io->length;
724 dev->req->zero = usb_raw_io_flags_zero(io->flags);
725 dev->ep0_urb_queued = true;
726 spin_unlock_irqrestore(&dev->lock, flags);
727
728 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
729 if (ret) {
730 dev_err(&dev->gadget->dev,
731 "fail, usb_ep_queue returned %d\n", ret);
732 spin_lock_irqsave(&dev->lock, flags);
733 goto out_queue_failed;
734 }
735
736 ret = wait_for_completion_interruptible(&dev->ep0_done);
737 if (ret) {
738 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
739 usb_ep_dequeue(dev->gadget->ep0, dev->req);
740 wait_for_completion(&dev->ep0_done);
741 spin_lock_irqsave(&dev->lock, flags);
742 if (dev->ep0_status == -ECONNRESET)
743 dev->ep0_status = -EINTR;
744 goto out_interrupted;
745 }
746
747 spin_lock_irqsave(&dev->lock, flags);
748
749 out_interrupted:
750 ret = dev->ep0_status;
751 out_queue_failed:
752 dev->ep0_urb_queued = false;
753 out_unlock:
754 spin_unlock_irqrestore(&dev->lock, flags);
755 return ret;
756 }
757
raw_ioctl_ep0_write(struct raw_dev * dev,unsigned long value)758 static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
759 {
760 int ret = 0;
761 void *data;
762 struct usb_raw_ep_io io;
763
764 data = raw_alloc_io_data(&io, (void __user *)value, true);
765 if (IS_ERR(data))
766 return PTR_ERR(data);
767 ret = raw_process_ep0_io(dev, &io, data, true);
768 kfree(data);
769 return ret;
770 }
771
raw_ioctl_ep0_read(struct raw_dev * dev,unsigned long value)772 static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
773 {
774 int ret = 0;
775 void *data;
776 struct usb_raw_ep_io io;
777 unsigned int length;
778
779 data = raw_alloc_io_data(&io, (void __user *)value, false);
780 if (IS_ERR(data))
781 return PTR_ERR(data);
782 ret = raw_process_ep0_io(dev, &io, data, false);
783 if (ret < 0)
784 goto free;
785
786 length = min_t(unsigned int, io.length, ret);
787 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
788 ret = -EFAULT;
789 else
790 ret = length;
791 free:
792 kfree(data);
793 return ret;
794 }
795
raw_ioctl_ep0_stall(struct raw_dev * dev,unsigned long value)796 static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
797 {
798 int ret = 0;
799 unsigned long flags;
800
801 if (value)
802 return -EINVAL;
803 spin_lock_irqsave(&dev->lock, flags);
804 if (dev->state != STATE_DEV_RUNNING) {
805 dev_dbg(dev->dev, "fail, device is not running\n");
806 ret = -EINVAL;
807 goto out_unlock;
808 }
809 if (!dev->gadget) {
810 dev_dbg(dev->dev, "fail, gadget is not bound\n");
811 ret = -EBUSY;
812 goto out_unlock;
813 }
814 if (dev->ep0_urb_queued) {
815 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
816 ret = -EBUSY;
817 goto out_unlock;
818 }
819 if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
820 dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
821 ret = -EBUSY;
822 goto out_unlock;
823 }
824
825 ret = usb_ep_set_halt(dev->gadget->ep0);
826 if (ret < 0)
827 dev_err(&dev->gadget->dev,
828 "fail, usb_ep_set_halt returned %d\n", ret);
829
830 if (dev->ep0_in_pending)
831 dev->ep0_in_pending = false;
832 else
833 dev->ep0_out_pending = false;
834
835 out_unlock:
836 spin_unlock_irqrestore(&dev->lock, flags);
837 return ret;
838 }
839
raw_ioctl_ep_enable(struct raw_dev * dev,unsigned long value)840 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
841 {
842 int ret = 0, i;
843 unsigned long flags;
844 struct usb_endpoint_descriptor *desc;
845 struct raw_ep *ep;
846 bool ep_props_matched = false;
847
848 desc = memdup_user((void __user *)value, sizeof(*desc));
849 if (IS_ERR(desc))
850 return PTR_ERR(desc);
851
852 /*
853 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
854 * drivers.
855 */
856 if (usb_endpoint_maxp(desc) == 0) {
857 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
858 kfree(desc);
859 return -EINVAL;
860 }
861
862 spin_lock_irqsave(&dev->lock, flags);
863 if (dev->state != STATE_DEV_RUNNING) {
864 dev_dbg(dev->dev, "fail, device is not running\n");
865 ret = -EINVAL;
866 goto out_free;
867 }
868 if (!dev->gadget) {
869 dev_dbg(dev->dev, "fail, gadget is not bound\n");
870 ret = -EBUSY;
871 goto out_free;
872 }
873
874 for (i = 0; i < dev->eps_num; i++) {
875 ep = &dev->eps[i];
876 if (ep->addr != usb_endpoint_num(desc) &&
877 ep->addr != USB_RAW_EP_ADDR_ANY)
878 continue;
879 if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
880 continue;
881 ep_props_matched = true;
882 if (ep->state != STATE_EP_DISABLED)
883 continue;
884 ep->ep->desc = desc;
885 ret = usb_ep_enable(ep->ep);
886 if (ret < 0) {
887 dev_err(&dev->gadget->dev,
888 "fail, usb_ep_enable returned %d\n", ret);
889 goto out_free;
890 }
891 ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
892 if (!ep->req) {
893 dev_err(&dev->gadget->dev,
894 "fail, usb_ep_alloc_request failed\n");
895 usb_ep_disable(ep->ep);
896 ret = -ENOMEM;
897 goto out_free;
898 }
899 ep->state = STATE_EP_ENABLED;
900 ep->ep->driver_data = ep;
901 ret = i;
902 goto out_unlock;
903 }
904
905 if (!ep_props_matched) {
906 dev_dbg(&dev->gadget->dev, "fail, bad endpoint descriptor\n");
907 ret = -EINVAL;
908 } else {
909 dev_dbg(&dev->gadget->dev, "fail, no endpoints available\n");
910 ret = -EBUSY;
911 }
912
913 out_free:
914 kfree(desc);
915 out_unlock:
916 spin_unlock_irqrestore(&dev->lock, flags);
917 return ret;
918 }
919
raw_ioctl_ep_disable(struct raw_dev * dev,unsigned long value)920 static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
921 {
922 int ret = 0, i = value;
923 unsigned long flags;
924
925 spin_lock_irqsave(&dev->lock, flags);
926 if (dev->state != STATE_DEV_RUNNING) {
927 dev_dbg(dev->dev, "fail, device is not running\n");
928 ret = -EINVAL;
929 goto out_unlock;
930 }
931 if (!dev->gadget) {
932 dev_dbg(dev->dev, "fail, gadget is not bound\n");
933 ret = -EBUSY;
934 goto out_unlock;
935 }
936 if (i < 0 || i >= dev->eps_num) {
937 dev_dbg(dev->dev, "fail, invalid endpoint\n");
938 ret = -EBUSY;
939 goto out_unlock;
940 }
941 if (dev->eps[i].state == STATE_EP_DISABLED) {
942 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
943 ret = -EINVAL;
944 goto out_unlock;
945 }
946 if (dev->eps[i].disabling) {
947 dev_dbg(&dev->gadget->dev,
948 "fail, disable already in progress\n");
949 ret = -EINVAL;
950 goto out_unlock;
951 }
952 if (dev->eps[i].urb_queued) {
953 dev_dbg(&dev->gadget->dev,
954 "fail, waiting for urb completion\n");
955 ret = -EINVAL;
956 goto out_unlock;
957 }
958 dev->eps[i].disabling = true;
959 spin_unlock_irqrestore(&dev->lock, flags);
960
961 usb_ep_disable(dev->eps[i].ep);
962
963 spin_lock_irqsave(&dev->lock, flags);
964 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
965 kfree(dev->eps[i].ep->desc);
966 dev->eps[i].state = STATE_EP_DISABLED;
967 dev->eps[i].disabling = false;
968
969 out_unlock:
970 spin_unlock_irqrestore(&dev->lock, flags);
971 return ret;
972 }
973
raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev * dev,unsigned long value,bool set,bool halt)974 static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
975 unsigned long value, bool set, bool halt)
976 {
977 int ret = 0, i = value;
978 unsigned long flags;
979
980 spin_lock_irqsave(&dev->lock, flags);
981 if (dev->state != STATE_DEV_RUNNING) {
982 dev_dbg(dev->dev, "fail, device is not running\n");
983 ret = -EINVAL;
984 goto out_unlock;
985 }
986 if (!dev->gadget) {
987 dev_dbg(dev->dev, "fail, gadget is not bound\n");
988 ret = -EBUSY;
989 goto out_unlock;
990 }
991 if (i < 0 || i >= dev->eps_num) {
992 dev_dbg(dev->dev, "fail, invalid endpoint\n");
993 ret = -EBUSY;
994 goto out_unlock;
995 }
996 if (dev->eps[i].state == STATE_EP_DISABLED) {
997 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
998 ret = -EINVAL;
999 goto out_unlock;
1000 }
1001 if (dev->eps[i].disabling) {
1002 dev_dbg(&dev->gadget->dev,
1003 "fail, disable is in progress\n");
1004 ret = -EINVAL;
1005 goto out_unlock;
1006 }
1007 if (dev->eps[i].urb_queued) {
1008 dev_dbg(&dev->gadget->dev,
1009 "fail, waiting for urb completion\n");
1010 ret = -EINVAL;
1011 goto out_unlock;
1012 }
1013 if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
1014 dev_dbg(&dev->gadget->dev,
1015 "fail, can't halt/wedge ISO endpoint\n");
1016 ret = -EINVAL;
1017 goto out_unlock;
1018 }
1019
1020 if (set && halt) {
1021 ret = usb_ep_set_halt(dev->eps[i].ep);
1022 if (ret < 0)
1023 dev_err(&dev->gadget->dev,
1024 "fail, usb_ep_set_halt returned %d\n", ret);
1025 } else if (!set && halt) {
1026 ret = usb_ep_clear_halt(dev->eps[i].ep);
1027 if (ret < 0)
1028 dev_err(&dev->gadget->dev,
1029 "fail, usb_ep_clear_halt returned %d\n", ret);
1030 } else if (set && !halt) {
1031 ret = usb_ep_set_wedge(dev->eps[i].ep);
1032 if (ret < 0)
1033 dev_err(&dev->gadget->dev,
1034 "fail, usb_ep_set_wedge returned %d\n", ret);
1035 }
1036
1037 out_unlock:
1038 spin_unlock_irqrestore(&dev->lock, flags);
1039 return ret;
1040 }
1041
gadget_ep_complete(struct usb_ep * ep,struct usb_request * req)1042 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
1043 {
1044 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
1045 struct raw_dev *dev = r_ep->dev;
1046 unsigned long flags;
1047
1048 spin_lock_irqsave(&dev->lock, flags);
1049 if (req->status)
1050 r_ep->status = req->status;
1051 else
1052 r_ep->status = req->actual;
1053 spin_unlock_irqrestore(&dev->lock, flags);
1054
1055 complete((struct completion *)req->context);
1056 }
1057
raw_process_ep_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)1058 static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
1059 void *data, bool in)
1060 {
1061 int ret = 0;
1062 unsigned long flags;
1063 struct raw_ep *ep;
1064 DECLARE_COMPLETION_ONSTACK(done);
1065
1066 spin_lock_irqsave(&dev->lock, flags);
1067 if (dev->state != STATE_DEV_RUNNING) {
1068 dev_dbg(dev->dev, "fail, device is not running\n");
1069 ret = -EINVAL;
1070 goto out_unlock;
1071 }
1072 if (!dev->gadget) {
1073 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1074 ret = -EBUSY;
1075 goto out_unlock;
1076 }
1077 if (io->ep >= dev->eps_num) {
1078 dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
1079 ret = -EINVAL;
1080 goto out_unlock;
1081 }
1082 ep = &dev->eps[io->ep];
1083 if (ep->state != STATE_EP_ENABLED) {
1084 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
1085 ret = -EBUSY;
1086 goto out_unlock;
1087 }
1088 if (ep->disabling) {
1089 dev_dbg(&dev->gadget->dev,
1090 "fail, endpoint is already being disabled\n");
1091 ret = -EBUSY;
1092 goto out_unlock;
1093 }
1094 if (ep->urb_queued) {
1095 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
1096 ret = -EBUSY;
1097 goto out_unlock;
1098 }
1099 if (in != usb_endpoint_dir_in(ep->ep->desc)) {
1100 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
1101 ret = -EINVAL;
1102 goto out_unlock;
1103 }
1104
1105 ep->dev = dev;
1106 ep->req->context = &done;
1107 ep->req->complete = gadget_ep_complete;
1108 ep->req->buf = data;
1109 ep->req->length = io->length;
1110 ep->req->zero = usb_raw_io_flags_zero(io->flags);
1111 ep->urb_queued = true;
1112 spin_unlock_irqrestore(&dev->lock, flags);
1113
1114 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
1115 if (ret) {
1116 dev_err(&dev->gadget->dev,
1117 "fail, usb_ep_queue returned %d\n", ret);
1118 spin_lock_irqsave(&dev->lock, flags);
1119 goto out_queue_failed;
1120 }
1121
1122 ret = wait_for_completion_interruptible(&done);
1123 if (ret) {
1124 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
1125 usb_ep_dequeue(ep->ep, ep->req);
1126 wait_for_completion(&done);
1127 spin_lock_irqsave(&dev->lock, flags);
1128 if (ep->status == -ECONNRESET)
1129 ep->status = -EINTR;
1130 goto out_interrupted;
1131 }
1132
1133 spin_lock_irqsave(&dev->lock, flags);
1134
1135 out_interrupted:
1136 ret = ep->status;
1137 out_queue_failed:
1138 ep->urb_queued = false;
1139 out_unlock:
1140 spin_unlock_irqrestore(&dev->lock, flags);
1141 return ret;
1142 }
1143
raw_ioctl_ep_write(struct raw_dev * dev,unsigned long value)1144 static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
1145 {
1146 int ret = 0;
1147 char *data;
1148 struct usb_raw_ep_io io;
1149
1150 data = raw_alloc_io_data(&io, (void __user *)value, true);
1151 if (IS_ERR(data))
1152 return PTR_ERR(data);
1153 ret = raw_process_ep_io(dev, &io, data, true);
1154 kfree(data);
1155 return ret;
1156 }
1157
raw_ioctl_ep_read(struct raw_dev * dev,unsigned long value)1158 static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
1159 {
1160 int ret = 0;
1161 char *data;
1162 struct usb_raw_ep_io io;
1163 unsigned int length;
1164
1165 data = raw_alloc_io_data(&io, (void __user *)value, false);
1166 if (IS_ERR(data))
1167 return PTR_ERR(data);
1168 ret = raw_process_ep_io(dev, &io, data, false);
1169 if (ret < 0)
1170 goto free;
1171
1172 length = min_t(unsigned int, io.length, ret);
1173 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
1174 ret = -EFAULT;
1175 else
1176 ret = length;
1177 free:
1178 kfree(data);
1179 return ret;
1180 }
1181
raw_ioctl_configure(struct raw_dev * dev,unsigned long value)1182 static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
1183 {
1184 int ret = 0;
1185 unsigned long flags;
1186
1187 if (value)
1188 return -EINVAL;
1189 spin_lock_irqsave(&dev->lock, flags);
1190 if (dev->state != STATE_DEV_RUNNING) {
1191 dev_dbg(dev->dev, "fail, device is not running\n");
1192 ret = -EINVAL;
1193 goto out_unlock;
1194 }
1195 if (!dev->gadget) {
1196 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1197 ret = -EBUSY;
1198 goto out_unlock;
1199 }
1200 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1201
1202 out_unlock:
1203 spin_unlock_irqrestore(&dev->lock, flags);
1204 return ret;
1205 }
1206
raw_ioctl_vbus_draw(struct raw_dev * dev,unsigned long value)1207 static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1208 {
1209 int ret = 0;
1210 unsigned long flags;
1211
1212 spin_lock_irqsave(&dev->lock, flags);
1213 if (dev->state != STATE_DEV_RUNNING) {
1214 dev_dbg(dev->dev, "fail, device is not running\n");
1215 ret = -EINVAL;
1216 goto out_unlock;
1217 }
1218 if (!dev->gadget) {
1219 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1220 ret = -EBUSY;
1221 goto out_unlock;
1222 }
1223 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1224
1225 out_unlock:
1226 spin_unlock_irqrestore(&dev->lock, flags);
1227 return ret;
1228 }
1229
fill_ep_caps(struct usb_ep_caps * caps,struct usb_raw_ep_caps * raw_caps)1230 static void fill_ep_caps(struct usb_ep_caps *caps,
1231 struct usb_raw_ep_caps *raw_caps)
1232 {
1233 raw_caps->type_control = caps->type_control;
1234 raw_caps->type_iso = caps->type_iso;
1235 raw_caps->type_bulk = caps->type_bulk;
1236 raw_caps->type_int = caps->type_int;
1237 raw_caps->dir_in = caps->dir_in;
1238 raw_caps->dir_out = caps->dir_out;
1239 }
1240
fill_ep_limits(struct usb_ep * ep,struct usb_raw_ep_limits * limits)1241 static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
1242 {
1243 limits->maxpacket_limit = ep->maxpacket_limit;
1244 limits->max_streams = ep->max_streams;
1245 }
1246
raw_ioctl_eps_info(struct raw_dev * dev,unsigned long value)1247 static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
1248 {
1249 int ret = 0, i;
1250 unsigned long flags;
1251 struct usb_raw_eps_info *info;
1252 struct raw_ep *ep;
1253
1254 info = kzalloc(sizeof(*info), GFP_KERNEL);
1255 if (!info) {
1256 ret = -ENOMEM;
1257 goto out;
1258 }
1259
1260 spin_lock_irqsave(&dev->lock, flags);
1261 if (dev->state != STATE_DEV_RUNNING) {
1262 dev_dbg(dev->dev, "fail, device is not running\n");
1263 ret = -EINVAL;
1264 spin_unlock_irqrestore(&dev->lock, flags);
1265 goto out_free;
1266 }
1267 if (!dev->gadget) {
1268 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1269 ret = -EBUSY;
1270 spin_unlock_irqrestore(&dev->lock, flags);
1271 goto out_free;
1272 }
1273
1274 for (i = 0; i < dev->eps_num; i++) {
1275 ep = &dev->eps[i];
1276 strscpy(&info->eps[i].name[0], ep->ep->name,
1277 USB_RAW_EP_NAME_MAX);
1278 info->eps[i].addr = ep->addr;
1279 fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
1280 fill_ep_limits(ep->ep, &info->eps[i].limits);
1281 }
1282 ret = dev->eps_num;
1283 spin_unlock_irqrestore(&dev->lock, flags);
1284
1285 if (copy_to_user((void __user *)value, info, sizeof(*info)))
1286 ret = -EFAULT;
1287
1288 out_free:
1289 kfree(info);
1290 out:
1291 return ret;
1292 }
1293
raw_ioctl(struct file * fd,unsigned int cmd,unsigned long value)1294 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1295 {
1296 struct raw_dev *dev = fd->private_data;
1297 int ret = 0;
1298
1299 if (!dev)
1300 return -EBUSY;
1301
1302 switch (cmd) {
1303 case USB_RAW_IOCTL_INIT:
1304 ret = raw_ioctl_init(dev, value);
1305 break;
1306 case USB_RAW_IOCTL_RUN:
1307 ret = raw_ioctl_run(dev, value);
1308 break;
1309 case USB_RAW_IOCTL_EVENT_FETCH:
1310 ret = raw_ioctl_event_fetch(dev, value);
1311 break;
1312 case USB_RAW_IOCTL_EP0_WRITE:
1313 ret = raw_ioctl_ep0_write(dev, value);
1314 break;
1315 case USB_RAW_IOCTL_EP0_READ:
1316 ret = raw_ioctl_ep0_read(dev, value);
1317 break;
1318 case USB_RAW_IOCTL_EP_ENABLE:
1319 ret = raw_ioctl_ep_enable(dev, value);
1320 break;
1321 case USB_RAW_IOCTL_EP_DISABLE:
1322 ret = raw_ioctl_ep_disable(dev, value);
1323 break;
1324 case USB_RAW_IOCTL_EP_WRITE:
1325 ret = raw_ioctl_ep_write(dev, value);
1326 break;
1327 case USB_RAW_IOCTL_EP_READ:
1328 ret = raw_ioctl_ep_read(dev, value);
1329 break;
1330 case USB_RAW_IOCTL_CONFIGURE:
1331 ret = raw_ioctl_configure(dev, value);
1332 break;
1333 case USB_RAW_IOCTL_VBUS_DRAW:
1334 ret = raw_ioctl_vbus_draw(dev, value);
1335 break;
1336 case USB_RAW_IOCTL_EPS_INFO:
1337 ret = raw_ioctl_eps_info(dev, value);
1338 break;
1339 case USB_RAW_IOCTL_EP0_STALL:
1340 ret = raw_ioctl_ep0_stall(dev, value);
1341 break;
1342 case USB_RAW_IOCTL_EP_SET_HALT:
1343 ret = raw_ioctl_ep_set_clear_halt_wedge(
1344 dev, value, true, true);
1345 break;
1346 case USB_RAW_IOCTL_EP_CLEAR_HALT:
1347 ret = raw_ioctl_ep_set_clear_halt_wedge(
1348 dev, value, false, true);
1349 break;
1350 case USB_RAW_IOCTL_EP_SET_WEDGE:
1351 ret = raw_ioctl_ep_set_clear_halt_wedge(
1352 dev, value, true, false);
1353 break;
1354 default:
1355 ret = -EINVAL;
1356 }
1357
1358 return ret;
1359 }
1360
1361 /*----------------------------------------------------------------------*/
1362
1363 static const struct file_operations raw_fops = {
1364 .open = raw_open,
1365 .unlocked_ioctl = raw_ioctl,
1366 .compat_ioctl = raw_ioctl,
1367 .release = raw_release,
1368 };
1369
1370 static struct miscdevice raw_misc_device = {
1371 .minor = MISC_DYNAMIC_MINOR,
1372 .name = DRIVER_NAME,
1373 .fops = &raw_fops,
1374 };
1375
1376 module_misc_device(raw_misc_device);
1377