1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * inode.c -- user mode filesystem api for usb gadget controllers
4 *
5 * Copyright (C) 2003-2004 David Brownell
6 * Copyright (C) 2003 Agilent Technologies
7 */
8
9
10 /* #define VERBOSE_DEBUG */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/kthread.h>
25 #include <linux/aio.h>
26 #include <linux/uio.h>
27 #include <linux/refcount.h>
28 #include <linux/delay.h>
29 #include <linux/device.h>
30 #include <linux/moduleparam.h>
31
32 #include <linux/usb/gadgetfs.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
35
36 /* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
37 #undef DBG
38 #undef ERROR
39 #undef INFO
40
41
42 /*
43 * The gadgetfs API maps each endpoint to a file descriptor so that you
44 * can use standard synchronous read/write calls for I/O. There's some
45 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
46 * drivers show how this works in practice. You can also use AIO to
47 * eliminate I/O gaps between requests, to help when streaming data.
48 *
49 * Key parts that must be USB-specific are protocols defining how the
50 * read/write operations relate to the hardware state machines. There
51 * are two types of files. One type is for the device, implementing ep0.
52 * The other type is for each IN or OUT endpoint. In both cases, the
53 * user mode driver must configure the hardware before using it.
54 *
55 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
56 * (by writing configuration and device descriptors). Afterwards it
57 * may serve as a source of device events, used to handle all control
58 * requests other than basic enumeration.
59 *
60 * - Then, after a SET_CONFIGURATION control request, ep_config() is
61 * called when each /dev/gadget/ep* file is configured (by writing
62 * endpoint descriptors). Afterwards these files are used to write()
63 * IN data or to read() OUT data. To halt the endpoint, a "wrong
64 * direction" request is issued (like reading an IN endpoint).
65 *
66 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
67 * not possible on all hardware. For example, precise fault handling with
68 * respect to data left in endpoint fifos after aborted operations; or
69 * selective clearing of endpoint halts, to implement SET_INTERFACE.
70 */
71
72 #define DRIVER_DESC "USB Gadget filesystem"
73 #define DRIVER_VERSION "24 Aug 2004"
74
75 static const char driver_desc [] = DRIVER_DESC;
76 static const char shortname [] = "gadgetfs";
77
78 MODULE_DESCRIPTION (DRIVER_DESC);
79 MODULE_AUTHOR ("David Brownell");
80 MODULE_LICENSE ("GPL");
81
82 static int ep_open(struct inode *, struct file *);
83
84
85 /*----------------------------------------------------------------------*/
86
87 #define GADGETFS_MAGIC 0xaee71ee7
88
89 /* /dev/gadget/$CHIP represents ep0 and the whole device */
90 enum ep0_state {
91 /* DISABLED is the initial state. */
92 STATE_DEV_DISABLED = 0,
93
94 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
95 * ep0/device i/o modes and binding to the controller. Driver
96 * must always write descriptors to initialize the device, then
97 * the device becomes UNCONNECTED until enumeration.
98 */
99 STATE_DEV_OPENED,
100
101 /* From then on, ep0 fd is in either of two basic modes:
102 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
103 * - SETUP: read/write will transfer control data and succeed;
104 * or if "wrong direction", performs protocol stall
105 */
106 STATE_DEV_UNCONNECTED,
107 STATE_DEV_CONNECTED,
108 STATE_DEV_SETUP,
109
110 /* UNBOUND means the driver closed ep0, so the device won't be
111 * accessible again (DEV_DISABLED) until all fds are closed.
112 */
113 STATE_DEV_UNBOUND,
114 };
115
116 /* enough for the whole queue: most events invalidate others */
117 #define N_EVENT 5
118
119 #define RBUF_SIZE 256
120
121 struct dev_data {
122 spinlock_t lock;
123 refcount_t count;
124 int udc_usage;
125 enum ep0_state state; /* P: lock */
126 struct usb_gadgetfs_event event [N_EVENT];
127 unsigned ev_next;
128 struct fasync_struct *fasync;
129 u8 current_config;
130
131 /* drivers reading ep0 MUST handle control requests (SETUP)
132 * reported that way; else the host will time out.
133 */
134 unsigned usermode_setup : 1,
135 setup_in : 1,
136 setup_can_stall : 1,
137 setup_out_ready : 1,
138 setup_out_error : 1,
139 setup_abort : 1,
140 gadget_registered : 1;
141 unsigned setup_wLength;
142
143 /* the rest is basically write-once */
144 struct usb_config_descriptor *config, *hs_config;
145 struct usb_device_descriptor *dev;
146 struct usb_request *req;
147 struct usb_gadget *gadget;
148 struct list_head epfiles;
149 void *buf;
150 wait_queue_head_t wait;
151 struct super_block *sb;
152 struct dentry *dentry;
153
154 /* except this scratch i/o buffer for ep0 */
155 u8 rbuf[RBUF_SIZE];
156 };
157
get_dev(struct dev_data * data)158 static inline void get_dev (struct dev_data *data)
159 {
160 refcount_inc (&data->count);
161 }
162
put_dev(struct dev_data * data)163 static void put_dev (struct dev_data *data)
164 {
165 if (likely (!refcount_dec_and_test (&data->count)))
166 return;
167 /* needs no more cleanup */
168 BUG_ON (waitqueue_active (&data->wait));
169 kfree (data);
170 }
171
dev_new(void)172 static struct dev_data *dev_new (void)
173 {
174 struct dev_data *dev;
175
176 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
177 if (!dev)
178 return NULL;
179 dev->state = STATE_DEV_DISABLED;
180 refcount_set (&dev->count, 1);
181 spin_lock_init (&dev->lock);
182 INIT_LIST_HEAD (&dev->epfiles);
183 init_waitqueue_head (&dev->wait);
184 return dev;
185 }
186
187 /*----------------------------------------------------------------------*/
188
189 /* other /dev/gadget/$ENDPOINT files represent endpoints */
190 enum ep_state {
191 STATE_EP_DISABLED = 0,
192 STATE_EP_READY,
193 STATE_EP_ENABLED,
194 STATE_EP_UNBOUND,
195 };
196
197 struct ep_data {
198 struct mutex lock;
199 enum ep_state state;
200 refcount_t count;
201 struct dev_data *dev;
202 /* must hold dev->lock before accessing ep or req */
203 struct usb_ep *ep;
204 struct usb_request *req;
205 ssize_t status;
206 char name [16];
207 struct usb_endpoint_descriptor desc, hs_desc;
208 struct list_head epfiles;
209 wait_queue_head_t wait;
210 struct dentry *dentry;
211 };
212
get_ep(struct ep_data * data)213 static inline void get_ep (struct ep_data *data)
214 {
215 refcount_inc (&data->count);
216 }
217
put_ep(struct ep_data * data)218 static void put_ep (struct ep_data *data)
219 {
220 if (likely (!refcount_dec_and_test (&data->count)))
221 return;
222 put_dev (data->dev);
223 /* needs no more cleanup */
224 BUG_ON (!list_empty (&data->epfiles));
225 BUG_ON (waitqueue_active (&data->wait));
226 kfree (data);
227 }
228
229 /*----------------------------------------------------------------------*/
230
231 /* most "how to use the hardware" policy choices are in userspace:
232 * mapping endpoint roles (which the driver needs) to the capabilities
233 * which the usb controller has. most of those capabilities are exposed
234 * implicitly, starting with the driver name and then endpoint names.
235 */
236
237 static const char *CHIP;
238 static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
239
240 /*----------------------------------------------------------------------*/
241
242 /* NOTE: don't use dev_printk calls before binding to the gadget
243 * at the end of ep0 configuration, or after unbind.
244 */
245
246 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
247 #define xprintk(d,level,fmt,args...) \
248 printk(level "%s: " fmt , shortname , ## args)
249
250 #ifdef DEBUG
251 #define DBG(dev,fmt,args...) \
252 xprintk(dev , KERN_DEBUG , fmt , ## args)
253 #else
254 #define DBG(dev,fmt,args...) \
255 do { } while (0)
256 #endif /* DEBUG */
257
258 #ifdef VERBOSE_DEBUG
259 #define VDEBUG DBG
260 #else
261 #define VDEBUG(dev,fmt,args...) \
262 do { } while (0)
263 #endif /* DEBUG */
264
265 #define ERROR(dev,fmt,args...) \
266 xprintk(dev , KERN_ERR , fmt , ## args)
267 #define INFO(dev,fmt,args...) \
268 xprintk(dev , KERN_INFO , fmt , ## args)
269
270
271 /*----------------------------------------------------------------------*/
272
273 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
274 *
275 * After opening, configure non-control endpoints. Then use normal
276 * stream read() and write() requests; and maybe ioctl() to get more
277 * precise FIFO status when recovering from cancellation.
278 */
279
epio_complete(struct usb_ep * ep,struct usb_request * req)280 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
281 {
282 struct ep_data *epdata = ep->driver_data;
283
284 if (!req->context)
285 return;
286 if (req->status)
287 epdata->status = req->status;
288 else
289 epdata->status = req->actual;
290 complete ((struct completion *)req->context);
291 }
292
293 /* tasklock endpoint, returning when it's connected.
294 * still need dev->lock to use epdata->ep.
295 */
296 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)297 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
298 {
299 int val;
300
301 if (f_flags & O_NONBLOCK) {
302 if (!mutex_trylock(&epdata->lock))
303 goto nonblock;
304 if (epdata->state != STATE_EP_ENABLED &&
305 (!is_write || epdata->state != STATE_EP_READY)) {
306 mutex_unlock(&epdata->lock);
307 nonblock:
308 val = -EAGAIN;
309 } else
310 val = 0;
311 return val;
312 }
313
314 val = mutex_lock_interruptible(&epdata->lock);
315 if (val < 0)
316 return val;
317
318 switch (epdata->state) {
319 case STATE_EP_ENABLED:
320 return 0;
321 case STATE_EP_READY: /* not configured yet */
322 if (is_write)
323 return 0;
324 fallthrough;
325 case STATE_EP_UNBOUND: /* clean disconnect */
326 break;
327 // case STATE_EP_DISABLED: /* "can't happen" */
328 default: /* error! */
329 pr_debug ("%s: ep %p not available, state %d\n",
330 shortname, epdata, epdata->state);
331 }
332 mutex_unlock(&epdata->lock);
333 return -ENODEV;
334 }
335
336 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)337 ep_io (struct ep_data *epdata, void *buf, unsigned len)
338 {
339 DECLARE_COMPLETION_ONSTACK (done);
340 int value;
341
342 spin_lock_irq (&epdata->dev->lock);
343 if (likely (epdata->ep != NULL)) {
344 struct usb_request *req = epdata->req;
345
346 req->context = &done;
347 req->complete = epio_complete;
348 req->buf = buf;
349 req->length = len;
350 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
351 } else
352 value = -ENODEV;
353 spin_unlock_irq (&epdata->dev->lock);
354
355 if (likely (value == 0)) {
356 value = wait_for_completion_interruptible(&done);
357 if (value != 0) {
358 spin_lock_irq (&epdata->dev->lock);
359 if (likely (epdata->ep != NULL)) {
360 DBG (epdata->dev, "%s i/o interrupted\n",
361 epdata->name);
362 usb_ep_dequeue (epdata->ep, epdata->req);
363 spin_unlock_irq (&epdata->dev->lock);
364
365 wait_for_completion(&done);
366 if (epdata->status == -ECONNRESET)
367 epdata->status = -EINTR;
368 } else {
369 spin_unlock_irq (&epdata->dev->lock);
370
371 DBG (epdata->dev, "endpoint gone\n");
372 wait_for_completion(&done);
373 epdata->status = -ENODEV;
374 }
375 }
376 return epdata->status;
377 }
378 return value;
379 }
380
381 static int
ep_release(struct inode * inode,struct file * fd)382 ep_release (struct inode *inode, struct file *fd)
383 {
384 struct ep_data *data = fd->private_data;
385 int value;
386
387 value = mutex_lock_interruptible(&data->lock);
388 if (value < 0)
389 return value;
390
391 /* clean up if this can be reopened */
392 if (data->state != STATE_EP_UNBOUND) {
393 data->state = STATE_EP_DISABLED;
394 data->desc.bDescriptorType = 0;
395 data->hs_desc.bDescriptorType = 0;
396 usb_ep_disable(data->ep);
397 }
398 mutex_unlock(&data->lock);
399 put_ep (data);
400 return 0;
401 }
402
ep_ioctl(struct file * fd,unsigned code,unsigned long value)403 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
404 {
405 struct ep_data *data = fd->private_data;
406 int status;
407
408 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
409 return status;
410
411 spin_lock_irq (&data->dev->lock);
412 if (likely (data->ep != NULL)) {
413 switch (code) {
414 case GADGETFS_FIFO_STATUS:
415 status = usb_ep_fifo_status (data->ep);
416 break;
417 case GADGETFS_FIFO_FLUSH:
418 usb_ep_fifo_flush (data->ep);
419 break;
420 case GADGETFS_CLEAR_HALT:
421 status = usb_ep_clear_halt (data->ep);
422 break;
423 default:
424 status = -ENOTTY;
425 }
426 } else
427 status = -ENODEV;
428 spin_unlock_irq (&data->dev->lock);
429 mutex_unlock(&data->lock);
430 return status;
431 }
432
433 /*----------------------------------------------------------------------*/
434
435 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
436
437 struct kiocb_priv {
438 struct usb_request *req;
439 struct ep_data *epdata;
440 struct kiocb *iocb;
441 struct mm_struct *mm;
442 struct work_struct work;
443 void *buf;
444 struct iov_iter to;
445 const void *to_free;
446 unsigned actual;
447 };
448
ep_aio_cancel(struct kiocb * iocb)449 static int ep_aio_cancel(struct kiocb *iocb)
450 {
451 struct kiocb_priv *priv = iocb->private;
452 struct ep_data *epdata;
453 int value;
454
455 local_irq_disable();
456 epdata = priv->epdata;
457 // spin_lock(&epdata->dev->lock);
458 if (likely(epdata && epdata->ep && priv->req))
459 value = usb_ep_dequeue (epdata->ep, priv->req);
460 else
461 value = -EINVAL;
462 // spin_unlock(&epdata->dev->lock);
463 local_irq_enable();
464
465 return value;
466 }
467
ep_user_copy_worker(struct work_struct * work)468 static void ep_user_copy_worker(struct work_struct *work)
469 {
470 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
471 struct mm_struct *mm = priv->mm;
472 struct kiocb *iocb = priv->iocb;
473 size_t ret;
474
475 kthread_use_mm(mm);
476 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
477 kthread_unuse_mm(mm);
478 if (!ret)
479 ret = -EFAULT;
480
481 /* completing the iocb can drop the ctx and mm, don't touch mm after */
482 iocb->ki_complete(iocb, ret);
483
484 kfree(priv->buf);
485 kfree(priv->to_free);
486 kfree(priv);
487 }
488
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)489 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
490 {
491 struct kiocb *iocb = req->context;
492 struct kiocb_priv *priv = iocb->private;
493 struct ep_data *epdata = priv->epdata;
494
495 /* lock against disconnect (and ideally, cancel) */
496 spin_lock(&epdata->dev->lock);
497 priv->req = NULL;
498 priv->epdata = NULL;
499
500 /* if this was a write or a read returning no data then we
501 * don't need to copy anything to userspace, so we can
502 * complete the aio request immediately.
503 */
504 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
505 kfree(req->buf);
506 kfree(priv->to_free);
507 kfree(priv);
508 iocb->private = NULL;
509 iocb->ki_complete(iocb,
510 req->actual ? req->actual : (long)req->status);
511 } else {
512 /* ep_copy_to_user() won't report both; we hide some faults */
513 if (unlikely(0 != req->status))
514 DBG(epdata->dev, "%s fault %d len %d\n",
515 ep->name, req->status, req->actual);
516
517 priv->buf = req->buf;
518 priv->actual = req->actual;
519 INIT_WORK(&priv->work, ep_user_copy_worker);
520 schedule_work(&priv->work);
521 }
522
523 usb_ep_free_request(ep, req);
524 spin_unlock(&epdata->dev->lock);
525 put_ep(epdata);
526 }
527
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)528 static ssize_t ep_aio(struct kiocb *iocb,
529 struct kiocb_priv *priv,
530 struct ep_data *epdata,
531 char *buf,
532 size_t len)
533 {
534 struct usb_request *req;
535 ssize_t value;
536
537 iocb->private = priv;
538 priv->iocb = iocb;
539
540 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
541 get_ep(epdata);
542 priv->epdata = epdata;
543 priv->actual = 0;
544 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
545
546 /* each kiocb is coupled to one usb_request, but we can't
547 * allocate or submit those if the host disconnected.
548 */
549 spin_lock_irq(&epdata->dev->lock);
550 value = -ENODEV;
551 if (unlikely(epdata->ep == NULL))
552 goto fail;
553
554 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
555 value = -ENOMEM;
556 if (unlikely(!req))
557 goto fail;
558
559 priv->req = req;
560 req->buf = buf;
561 req->length = len;
562 req->complete = ep_aio_complete;
563 req->context = iocb;
564 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
565 if (unlikely(0 != value)) {
566 usb_ep_free_request(epdata->ep, req);
567 goto fail;
568 }
569 spin_unlock_irq(&epdata->dev->lock);
570 return -EIOCBQUEUED;
571
572 fail:
573 spin_unlock_irq(&epdata->dev->lock);
574 kfree(priv->to_free);
575 kfree(priv);
576 put_ep(epdata);
577 return value;
578 }
579
580 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)581 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
582 {
583 struct file *file = iocb->ki_filp;
584 struct ep_data *epdata = file->private_data;
585 size_t len = iov_iter_count(to);
586 ssize_t value;
587 char *buf;
588
589 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
590 return value;
591
592 /* halt any endpoint by doing a "wrong direction" i/o call */
593 if (usb_endpoint_dir_in(&epdata->desc)) {
594 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
595 !is_sync_kiocb(iocb)) {
596 mutex_unlock(&epdata->lock);
597 return -EINVAL;
598 }
599 DBG (epdata->dev, "%s halt\n", epdata->name);
600 spin_lock_irq(&epdata->dev->lock);
601 if (likely(epdata->ep != NULL))
602 usb_ep_set_halt(epdata->ep);
603 spin_unlock_irq(&epdata->dev->lock);
604 mutex_unlock(&epdata->lock);
605 return -EBADMSG;
606 }
607
608 buf = kmalloc(len, GFP_KERNEL);
609 if (unlikely(!buf)) {
610 mutex_unlock(&epdata->lock);
611 return -ENOMEM;
612 }
613 if (is_sync_kiocb(iocb)) {
614 value = ep_io(epdata, buf, len);
615 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
616 value = -EFAULT;
617 } else {
618 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
619 value = -ENOMEM;
620 if (!priv)
621 goto fail;
622 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
623 if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
624 kfree(priv);
625 goto fail;
626 }
627 value = ep_aio(iocb, priv, epdata, buf, len);
628 if (value == -EIOCBQUEUED)
629 buf = NULL;
630 }
631 fail:
632 kfree(buf);
633 mutex_unlock(&epdata->lock);
634 return value;
635 }
636
637 static ssize_t ep_config(struct ep_data *, const char *, size_t);
638
639 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)640 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
641 {
642 struct file *file = iocb->ki_filp;
643 struct ep_data *epdata = file->private_data;
644 size_t len = iov_iter_count(from);
645 bool configured;
646 ssize_t value;
647 char *buf;
648
649 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
650 return value;
651
652 configured = epdata->state == STATE_EP_ENABLED;
653
654 /* halt any endpoint by doing a "wrong direction" i/o call */
655 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
656 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
657 !is_sync_kiocb(iocb)) {
658 mutex_unlock(&epdata->lock);
659 return -EINVAL;
660 }
661 DBG (epdata->dev, "%s halt\n", epdata->name);
662 spin_lock_irq(&epdata->dev->lock);
663 if (likely(epdata->ep != NULL))
664 usb_ep_set_halt(epdata->ep);
665 spin_unlock_irq(&epdata->dev->lock);
666 mutex_unlock(&epdata->lock);
667 return -EBADMSG;
668 }
669
670 buf = kmalloc(len, GFP_KERNEL);
671 if (unlikely(!buf)) {
672 mutex_unlock(&epdata->lock);
673 return -ENOMEM;
674 }
675
676 if (unlikely(!copy_from_iter_full(buf, len, from))) {
677 value = -EFAULT;
678 goto out;
679 }
680
681 if (unlikely(!configured)) {
682 value = ep_config(epdata, buf, len);
683 } else if (is_sync_kiocb(iocb)) {
684 value = ep_io(epdata, buf, len);
685 } else {
686 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
687 value = -ENOMEM;
688 if (priv) {
689 value = ep_aio(iocb, priv, epdata, buf, len);
690 if (value == -EIOCBQUEUED)
691 buf = NULL;
692 }
693 }
694 out:
695 kfree(buf);
696 mutex_unlock(&epdata->lock);
697 return value;
698 }
699
700 /*----------------------------------------------------------------------*/
701
702 /* used after endpoint configuration */
703 static const struct file_operations ep_io_operations = {
704 .owner = THIS_MODULE,
705
706 .open = ep_open,
707 .release = ep_release,
708 .unlocked_ioctl = ep_ioctl,
709 .read_iter = ep_read_iter,
710 .write_iter = ep_write_iter,
711 };
712
713 /* ENDPOINT INITIALIZATION
714 *
715 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
716 * status = write (fd, descriptors, sizeof descriptors)
717 *
718 * That write establishes the endpoint configuration, configuring
719 * the controller to process bulk, interrupt, or isochronous transfers
720 * at the right maxpacket size, and so on.
721 *
722 * The descriptors are message type 1, identified by a host order u32
723 * at the beginning of what's written. Descriptor order is: full/low
724 * speed descriptor, then optional high speed descriptor.
725 */
726 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)727 ep_config (struct ep_data *data, const char *buf, size_t len)
728 {
729 struct usb_ep *ep;
730 u32 tag;
731 int value, length = len;
732
733 if (data->state != STATE_EP_READY) {
734 value = -EL2HLT;
735 goto fail;
736 }
737
738 value = len;
739 if (len < USB_DT_ENDPOINT_SIZE + 4)
740 goto fail0;
741
742 /* we might need to change message format someday */
743 memcpy(&tag, buf, 4);
744 if (tag != 1) {
745 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
746 goto fail0;
747 }
748 buf += 4;
749 len -= 4;
750
751 /* NOTE: audio endpoint extensions not accepted here;
752 * just don't include the extra bytes.
753 */
754
755 /* full/low speed descriptor, then high speed */
756 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
757 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
758 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
759 goto fail0;
760 if (len != USB_DT_ENDPOINT_SIZE) {
761 if (len != 2 * USB_DT_ENDPOINT_SIZE)
762 goto fail0;
763 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
764 USB_DT_ENDPOINT_SIZE);
765 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
766 || data->hs_desc.bDescriptorType
767 != USB_DT_ENDPOINT) {
768 DBG(data->dev, "config %s, bad hs length or type\n",
769 data->name);
770 goto fail0;
771 }
772 }
773
774 spin_lock_irq (&data->dev->lock);
775 if (data->dev->state == STATE_DEV_UNBOUND) {
776 value = -ENOENT;
777 goto gone;
778 } else {
779 ep = data->ep;
780 if (ep == NULL) {
781 value = -ENODEV;
782 goto gone;
783 }
784 }
785 switch (data->dev->gadget->speed) {
786 case USB_SPEED_LOW:
787 case USB_SPEED_FULL:
788 ep->desc = &data->desc;
789 break;
790 case USB_SPEED_HIGH:
791 /* fails if caller didn't provide that descriptor... */
792 ep->desc = &data->hs_desc;
793 break;
794 default:
795 DBG(data->dev, "unconnected, %s init abandoned\n",
796 data->name);
797 value = -EINVAL;
798 goto gone;
799 }
800 value = usb_ep_enable(ep);
801 if (value == 0) {
802 data->state = STATE_EP_ENABLED;
803 value = length;
804 }
805 gone:
806 spin_unlock_irq (&data->dev->lock);
807 if (value < 0) {
808 fail:
809 data->desc.bDescriptorType = 0;
810 data->hs_desc.bDescriptorType = 0;
811 }
812 return value;
813 fail0:
814 value = -EINVAL;
815 goto fail;
816 }
817
818 static int
ep_open(struct inode * inode,struct file * fd)819 ep_open (struct inode *inode, struct file *fd)
820 {
821 struct ep_data *data = inode->i_private;
822 int value = -EBUSY;
823
824 if (mutex_lock_interruptible(&data->lock) != 0)
825 return -EINTR;
826 spin_lock_irq (&data->dev->lock);
827 if (data->dev->state == STATE_DEV_UNBOUND)
828 value = -ENOENT;
829 else if (data->state == STATE_EP_DISABLED) {
830 value = 0;
831 data->state = STATE_EP_READY;
832 get_ep (data);
833 fd->private_data = data;
834 VDEBUG (data->dev, "%s ready\n", data->name);
835 } else
836 DBG (data->dev, "%s state %d\n",
837 data->name, data->state);
838 spin_unlock_irq (&data->dev->lock);
839 mutex_unlock(&data->lock);
840 return value;
841 }
842
843 /*----------------------------------------------------------------------*/
844
845 /* EP0 IMPLEMENTATION can be partly in userspace.
846 *
847 * Drivers that use this facility receive various events, including
848 * control requests the kernel doesn't handle. Drivers that don't
849 * use this facility may be too simple-minded for real applications.
850 */
851
ep0_readable(struct dev_data * dev)852 static inline void ep0_readable (struct dev_data *dev)
853 {
854 wake_up (&dev->wait);
855 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
856 }
857
clean_req(struct usb_ep * ep,struct usb_request * req)858 static void clean_req (struct usb_ep *ep, struct usb_request *req)
859 {
860 struct dev_data *dev = ep->driver_data;
861
862 if (req->buf != dev->rbuf) {
863 kfree(req->buf);
864 req->buf = dev->rbuf;
865 }
866 req->complete = epio_complete;
867 dev->setup_out_ready = 0;
868 }
869
ep0_complete(struct usb_ep * ep,struct usb_request * req)870 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
871 {
872 struct dev_data *dev = ep->driver_data;
873 unsigned long flags;
874 int free = 1;
875
876 /* for control OUT, data must still get to userspace */
877 spin_lock_irqsave(&dev->lock, flags);
878 if (!dev->setup_in) {
879 dev->setup_out_error = (req->status != 0);
880 if (!dev->setup_out_error)
881 free = 0;
882 dev->setup_out_ready = 1;
883 ep0_readable (dev);
884 }
885
886 /* clean up as appropriate */
887 if (free && req->buf != &dev->rbuf)
888 clean_req (ep, req);
889 req->complete = epio_complete;
890 spin_unlock_irqrestore(&dev->lock, flags);
891 }
892
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)893 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
894 {
895 struct dev_data *dev = ep->driver_data;
896
897 if (dev->setup_out_ready) {
898 DBG (dev, "ep0 request busy!\n");
899 return -EBUSY;
900 }
901 if (len > sizeof (dev->rbuf))
902 req->buf = kmalloc(len, GFP_ATOMIC);
903 if (req->buf == NULL) {
904 req->buf = dev->rbuf;
905 return -ENOMEM;
906 }
907 req->complete = ep0_complete;
908 req->length = len;
909 req->zero = 0;
910 return 0;
911 }
912
913 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)914 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
915 {
916 struct dev_data *dev = fd->private_data;
917 ssize_t retval;
918 enum ep0_state state;
919
920 spin_lock_irq (&dev->lock);
921 if (dev->state <= STATE_DEV_OPENED) {
922 retval = -EINVAL;
923 goto done;
924 }
925
926 /* report fd mode change before acting on it */
927 if (dev->setup_abort) {
928 dev->setup_abort = 0;
929 retval = -EIDRM;
930 goto done;
931 }
932
933 /* control DATA stage */
934 if ((state = dev->state) == STATE_DEV_SETUP) {
935
936 if (dev->setup_in) { /* stall IN */
937 VDEBUG(dev, "ep0in stall\n");
938 (void) usb_ep_set_halt (dev->gadget->ep0);
939 retval = -EL2HLT;
940 dev->state = STATE_DEV_CONNECTED;
941
942 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
943 struct usb_ep *ep = dev->gadget->ep0;
944 struct usb_request *req = dev->req;
945
946 if ((retval = setup_req (ep, req, 0)) == 0) {
947 ++dev->udc_usage;
948 spin_unlock_irq (&dev->lock);
949 retval = usb_ep_queue (ep, req, GFP_KERNEL);
950 spin_lock_irq (&dev->lock);
951 --dev->udc_usage;
952 }
953 dev->state = STATE_DEV_CONNECTED;
954
955 /* assume that was SET_CONFIGURATION */
956 if (dev->current_config) {
957 unsigned power;
958
959 if (gadget_is_dualspeed(dev->gadget)
960 && (dev->gadget->speed
961 == USB_SPEED_HIGH))
962 power = dev->hs_config->bMaxPower;
963 else
964 power = dev->config->bMaxPower;
965 usb_gadget_vbus_draw(dev->gadget, 2 * power);
966 }
967
968 } else { /* collect OUT data */
969 if ((fd->f_flags & O_NONBLOCK) != 0
970 && !dev->setup_out_ready) {
971 retval = -EAGAIN;
972 goto done;
973 }
974 spin_unlock_irq (&dev->lock);
975 retval = wait_event_interruptible (dev->wait,
976 dev->setup_out_ready != 0);
977
978 /* FIXME state could change from under us */
979 spin_lock_irq (&dev->lock);
980 if (retval)
981 goto done;
982
983 if (dev->state != STATE_DEV_SETUP) {
984 retval = -ECANCELED;
985 goto done;
986 }
987 dev->state = STATE_DEV_CONNECTED;
988
989 if (dev->setup_out_error)
990 retval = -EIO;
991 else {
992 len = min (len, (size_t)dev->req->actual);
993 ++dev->udc_usage;
994 spin_unlock_irq(&dev->lock);
995 if (copy_to_user (buf, dev->req->buf, len))
996 retval = -EFAULT;
997 else
998 retval = len;
999 spin_lock_irq(&dev->lock);
1000 --dev->udc_usage;
1001 clean_req (dev->gadget->ep0, dev->req);
1002 /* NOTE userspace can't yet choose to stall */
1003 }
1004 }
1005 goto done;
1006 }
1007
1008 /* else normal: return event data */
1009 if (len < sizeof dev->event [0]) {
1010 retval = -EINVAL;
1011 goto done;
1012 }
1013 len -= len % sizeof (struct usb_gadgetfs_event);
1014 dev->usermode_setup = 1;
1015
1016 scan:
1017 /* return queued events right away */
1018 if (dev->ev_next != 0) {
1019 unsigned i, n;
1020
1021 n = len / sizeof (struct usb_gadgetfs_event);
1022 if (dev->ev_next < n)
1023 n = dev->ev_next;
1024
1025 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1026 for (i = 0; i < n; i++) {
1027 if (dev->event [i].type == GADGETFS_SETUP) {
1028 dev->state = STATE_DEV_SETUP;
1029 n = i + 1;
1030 break;
1031 }
1032 }
1033 spin_unlock_irq (&dev->lock);
1034 len = n * sizeof (struct usb_gadgetfs_event);
1035 if (copy_to_user (buf, &dev->event, len))
1036 retval = -EFAULT;
1037 else
1038 retval = len;
1039 if (len > 0) {
1040 /* NOTE this doesn't guard against broken drivers;
1041 * concurrent ep0 readers may lose events.
1042 */
1043 spin_lock_irq (&dev->lock);
1044 if (dev->ev_next > n) {
1045 memmove(&dev->event[0], &dev->event[n],
1046 sizeof (struct usb_gadgetfs_event)
1047 * (dev->ev_next - n));
1048 }
1049 dev->ev_next -= n;
1050 spin_unlock_irq (&dev->lock);
1051 }
1052 return retval;
1053 }
1054 if (fd->f_flags & O_NONBLOCK) {
1055 retval = -EAGAIN;
1056 goto done;
1057 }
1058
1059 switch (state) {
1060 default:
1061 DBG (dev, "fail %s, state %d\n", __func__, state);
1062 retval = -ESRCH;
1063 break;
1064 case STATE_DEV_UNCONNECTED:
1065 case STATE_DEV_CONNECTED:
1066 spin_unlock_irq (&dev->lock);
1067 DBG (dev, "%s wait\n", __func__);
1068
1069 /* wait for events */
1070 retval = wait_event_interruptible (dev->wait,
1071 dev->ev_next != 0);
1072 if (retval < 0)
1073 return retval;
1074 spin_lock_irq (&dev->lock);
1075 goto scan;
1076 }
1077
1078 done:
1079 spin_unlock_irq (&dev->lock);
1080 return retval;
1081 }
1082
1083 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1084 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1085 {
1086 struct usb_gadgetfs_event *event;
1087 unsigned i;
1088
1089 switch (type) {
1090 /* these events purge the queue */
1091 case GADGETFS_DISCONNECT:
1092 if (dev->state == STATE_DEV_SETUP)
1093 dev->setup_abort = 1;
1094 fallthrough;
1095 case GADGETFS_CONNECT:
1096 dev->ev_next = 0;
1097 break;
1098 case GADGETFS_SETUP: /* previous request timed out */
1099 case GADGETFS_SUSPEND: /* same effect */
1100 /* these events can't be repeated */
1101 for (i = 0; i != dev->ev_next; i++) {
1102 if (dev->event [i].type != type)
1103 continue;
1104 DBG(dev, "discard old event[%d] %d\n", i, type);
1105 dev->ev_next--;
1106 if (i == dev->ev_next)
1107 break;
1108 /* indices start at zero, for simplicity */
1109 memmove (&dev->event [i], &dev->event [i + 1],
1110 sizeof (struct usb_gadgetfs_event)
1111 * (dev->ev_next - i));
1112 }
1113 break;
1114 default:
1115 BUG ();
1116 }
1117 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1118 event = &dev->event [dev->ev_next++];
1119 BUG_ON (dev->ev_next > N_EVENT);
1120 memset (event, 0, sizeof *event);
1121 event->type = type;
1122 return event;
1123 }
1124
1125 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1126 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1127 {
1128 struct dev_data *dev = fd->private_data;
1129 ssize_t retval = -ESRCH;
1130
1131 /* report fd mode change before acting on it */
1132 if (dev->setup_abort) {
1133 dev->setup_abort = 0;
1134 retval = -EIDRM;
1135
1136 /* data and/or status stage for control request */
1137 } else if (dev->state == STATE_DEV_SETUP) {
1138
1139 len = min_t(size_t, len, dev->setup_wLength);
1140 if (dev->setup_in) {
1141 retval = setup_req (dev->gadget->ep0, dev->req, len);
1142 if (retval == 0) {
1143 dev->state = STATE_DEV_CONNECTED;
1144 ++dev->udc_usage;
1145 spin_unlock_irq (&dev->lock);
1146 if (copy_from_user (dev->req->buf, buf, len))
1147 retval = -EFAULT;
1148 else {
1149 if (len < dev->setup_wLength)
1150 dev->req->zero = 1;
1151 retval = usb_ep_queue (
1152 dev->gadget->ep0, dev->req,
1153 GFP_KERNEL);
1154 }
1155 spin_lock_irq(&dev->lock);
1156 --dev->udc_usage;
1157 if (retval < 0) {
1158 clean_req (dev->gadget->ep0, dev->req);
1159 } else
1160 retval = len;
1161
1162 return retval;
1163 }
1164
1165 /* can stall some OUT transfers */
1166 } else if (dev->setup_can_stall) {
1167 VDEBUG(dev, "ep0out stall\n");
1168 (void) usb_ep_set_halt (dev->gadget->ep0);
1169 retval = -EL2HLT;
1170 dev->state = STATE_DEV_CONNECTED;
1171 } else {
1172 DBG(dev, "bogus ep0out stall!\n");
1173 }
1174 } else
1175 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1176
1177 return retval;
1178 }
1179
1180 static int
ep0_fasync(int f,struct file * fd,int on)1181 ep0_fasync (int f, struct file *fd, int on)
1182 {
1183 struct dev_data *dev = fd->private_data;
1184 // caller must F_SETOWN before signal delivery happens
1185 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1186 return fasync_helper (f, fd, on, &dev->fasync);
1187 }
1188
1189 static struct usb_gadget_driver gadgetfs_driver;
1190
1191 static int
dev_release(struct inode * inode,struct file * fd)1192 dev_release (struct inode *inode, struct file *fd)
1193 {
1194 struct dev_data *dev = fd->private_data;
1195
1196 /* closing ep0 === shutdown all */
1197
1198 if (dev->gadget_registered) {
1199 usb_gadget_unregister_driver (&gadgetfs_driver);
1200 dev->gadget_registered = false;
1201 }
1202
1203 /* at this point "good" hardware has disconnected the
1204 * device from USB; the host won't see it any more.
1205 * alternatively, all host requests will time out.
1206 */
1207
1208 kfree (dev->buf);
1209 dev->buf = NULL;
1210
1211 /* other endpoints were all decoupled from this device */
1212 spin_lock_irq(&dev->lock);
1213 dev->state = STATE_DEV_DISABLED;
1214 spin_unlock_irq(&dev->lock);
1215
1216 put_dev (dev);
1217 return 0;
1218 }
1219
1220 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1221 ep0_poll (struct file *fd, poll_table *wait)
1222 {
1223 struct dev_data *dev = fd->private_data;
1224 __poll_t mask = 0;
1225
1226 if (dev->state <= STATE_DEV_OPENED)
1227 return DEFAULT_POLLMASK;
1228
1229 poll_wait(fd, &dev->wait, wait);
1230
1231 spin_lock_irq(&dev->lock);
1232
1233 /* report fd mode change before acting on it */
1234 if (dev->setup_abort) {
1235 dev->setup_abort = 0;
1236 mask = EPOLLHUP;
1237 goto out;
1238 }
1239
1240 if (dev->state == STATE_DEV_SETUP) {
1241 if (dev->setup_in || dev->setup_can_stall)
1242 mask = EPOLLOUT;
1243 } else {
1244 if (dev->ev_next != 0)
1245 mask = EPOLLIN;
1246 }
1247 out:
1248 spin_unlock_irq(&dev->lock);
1249 return mask;
1250 }
1251
gadget_dev_ioctl(struct file * fd,unsigned code,unsigned long value)1252 static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1253 {
1254 struct dev_data *dev = fd->private_data;
1255 struct usb_gadget *gadget = dev->gadget;
1256 long ret = -ENOTTY;
1257
1258 spin_lock_irq(&dev->lock);
1259 if (dev->state == STATE_DEV_OPENED ||
1260 dev->state == STATE_DEV_UNBOUND) {
1261 /* Not bound to a UDC */
1262 } else if (gadget->ops->ioctl) {
1263 ++dev->udc_usage;
1264 spin_unlock_irq(&dev->lock);
1265
1266 ret = gadget->ops->ioctl (gadget, code, value);
1267
1268 spin_lock_irq(&dev->lock);
1269 --dev->udc_usage;
1270 }
1271 spin_unlock_irq(&dev->lock);
1272
1273 return ret;
1274 }
1275
1276 /*----------------------------------------------------------------------*/
1277
1278 /* The in-kernel gadget driver handles most ep0 issues, in particular
1279 * enumerating the single configuration (as provided from user space).
1280 *
1281 * Unrecognized ep0 requests may be handled in user space.
1282 */
1283
make_qualifier(struct dev_data * dev)1284 static void make_qualifier (struct dev_data *dev)
1285 {
1286 struct usb_qualifier_descriptor qual;
1287 struct usb_device_descriptor *desc;
1288
1289 qual.bLength = sizeof qual;
1290 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1291 qual.bcdUSB = cpu_to_le16 (0x0200);
1292
1293 desc = dev->dev;
1294 qual.bDeviceClass = desc->bDeviceClass;
1295 qual.bDeviceSubClass = desc->bDeviceSubClass;
1296 qual.bDeviceProtocol = desc->bDeviceProtocol;
1297
1298 /* assumes ep0 uses the same value for both speeds ... */
1299 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1300
1301 qual.bNumConfigurations = 1;
1302 qual.bRESERVED = 0;
1303
1304 memcpy (dev->rbuf, &qual, sizeof qual);
1305 }
1306
1307 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1308 config_buf (struct dev_data *dev, u8 type, unsigned index)
1309 {
1310 int len;
1311 int hs = 0;
1312
1313 /* only one configuration */
1314 if (index > 0)
1315 return -EINVAL;
1316
1317 if (gadget_is_dualspeed(dev->gadget)) {
1318 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1319 if (type == USB_DT_OTHER_SPEED_CONFIG)
1320 hs = !hs;
1321 }
1322 if (hs) {
1323 dev->req->buf = dev->hs_config;
1324 len = le16_to_cpu(dev->hs_config->wTotalLength);
1325 } else {
1326 dev->req->buf = dev->config;
1327 len = le16_to_cpu(dev->config->wTotalLength);
1328 }
1329 ((u8 *)dev->req->buf) [1] = type;
1330 return len;
1331 }
1332
1333 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1334 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1335 {
1336 struct dev_data *dev = get_gadget_data (gadget);
1337 struct usb_request *req = dev->req;
1338 int value = -EOPNOTSUPP;
1339 struct usb_gadgetfs_event *event;
1340 u16 w_value = le16_to_cpu(ctrl->wValue);
1341 u16 w_length = le16_to_cpu(ctrl->wLength);
1342
1343 if (w_length > RBUF_SIZE) {
1344 if (ctrl->bRequestType & USB_DIR_IN) {
1345 /* Cast away the const, we are going to overwrite on purpose. */
1346 __le16 *temp = (__le16 *)&ctrl->wLength;
1347
1348 *temp = cpu_to_le16(RBUF_SIZE);
1349 w_length = RBUF_SIZE;
1350 } else {
1351 return value;
1352 }
1353 }
1354
1355 spin_lock (&dev->lock);
1356 dev->setup_abort = 0;
1357 if (dev->state == STATE_DEV_UNCONNECTED) {
1358 if (gadget_is_dualspeed(gadget)
1359 && gadget->speed == USB_SPEED_HIGH
1360 && dev->hs_config == NULL) {
1361 spin_unlock(&dev->lock);
1362 ERROR (dev, "no high speed config??\n");
1363 return -EINVAL;
1364 }
1365
1366 dev->state = STATE_DEV_CONNECTED;
1367
1368 INFO (dev, "connected\n");
1369 event = next_event (dev, GADGETFS_CONNECT);
1370 event->u.speed = gadget->speed;
1371 ep0_readable (dev);
1372
1373 /* host may have given up waiting for response. we can miss control
1374 * requests handled lower down (device/endpoint status and features);
1375 * then ep0_{read,write} will report the wrong status. controller
1376 * driver will have aborted pending i/o.
1377 */
1378 } else if (dev->state == STATE_DEV_SETUP)
1379 dev->setup_abort = 1;
1380
1381 req->buf = dev->rbuf;
1382 req->context = NULL;
1383 switch (ctrl->bRequest) {
1384
1385 case USB_REQ_GET_DESCRIPTOR:
1386 if (ctrl->bRequestType != USB_DIR_IN)
1387 goto unrecognized;
1388 switch (w_value >> 8) {
1389
1390 case USB_DT_DEVICE:
1391 value = min (w_length, (u16) sizeof *dev->dev);
1392 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1393 req->buf = dev->dev;
1394 break;
1395 case USB_DT_DEVICE_QUALIFIER:
1396 if (!dev->hs_config)
1397 break;
1398 value = min (w_length, (u16)
1399 sizeof (struct usb_qualifier_descriptor));
1400 make_qualifier (dev);
1401 break;
1402 case USB_DT_OTHER_SPEED_CONFIG:
1403 case USB_DT_CONFIG:
1404 value = config_buf (dev,
1405 w_value >> 8,
1406 w_value & 0xff);
1407 if (value >= 0)
1408 value = min (w_length, (u16) value);
1409 break;
1410 case USB_DT_STRING:
1411 goto unrecognized;
1412
1413 default: // all others are errors
1414 break;
1415 }
1416 break;
1417
1418 /* currently one config, two speeds */
1419 case USB_REQ_SET_CONFIGURATION:
1420 if (ctrl->bRequestType != 0)
1421 goto unrecognized;
1422 if (0 == (u8) w_value) {
1423 value = 0;
1424 dev->current_config = 0;
1425 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1426 // user mode expected to disable endpoints
1427 } else {
1428 u8 config, power;
1429
1430 if (gadget_is_dualspeed(gadget)
1431 && gadget->speed == USB_SPEED_HIGH) {
1432 config = dev->hs_config->bConfigurationValue;
1433 power = dev->hs_config->bMaxPower;
1434 } else {
1435 config = dev->config->bConfigurationValue;
1436 power = dev->config->bMaxPower;
1437 }
1438
1439 if (config == (u8) w_value) {
1440 value = 0;
1441 dev->current_config = config;
1442 usb_gadget_vbus_draw(gadget, 2 * power);
1443 }
1444 }
1445
1446 /* report SET_CONFIGURATION like any other control request,
1447 * except that usermode may not stall this. the next
1448 * request mustn't be allowed start until this finishes:
1449 * endpoints and threads set up, etc.
1450 *
1451 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1452 * has bad/racey automagic that prevents synchronizing here.
1453 * even kernel mode drivers often miss them.
1454 */
1455 if (value == 0) {
1456 INFO (dev, "configuration #%d\n", dev->current_config);
1457 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1458 if (dev->usermode_setup) {
1459 dev->setup_can_stall = 0;
1460 goto delegate;
1461 }
1462 }
1463 break;
1464
1465 #ifndef CONFIG_USB_PXA25X
1466 /* PXA automagically handles this request too */
1467 case USB_REQ_GET_CONFIGURATION:
1468 if (ctrl->bRequestType != 0x80)
1469 goto unrecognized;
1470 *(u8 *)req->buf = dev->current_config;
1471 value = min (w_length, (u16) 1);
1472 break;
1473 #endif
1474
1475 default:
1476 unrecognized:
1477 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1478 dev->usermode_setup ? "delegate" : "fail",
1479 ctrl->bRequestType, ctrl->bRequest,
1480 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1481
1482 /* if there's an ep0 reader, don't stall */
1483 if (dev->usermode_setup) {
1484 dev->setup_can_stall = 1;
1485 delegate:
1486 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1487 ? 1 : 0;
1488 dev->setup_wLength = w_length;
1489 dev->setup_out_ready = 0;
1490 dev->setup_out_error = 0;
1491
1492 /* read DATA stage for OUT right away */
1493 if (unlikely (!dev->setup_in && w_length)) {
1494 value = setup_req (gadget->ep0, dev->req,
1495 w_length);
1496 if (value < 0)
1497 break;
1498
1499 ++dev->udc_usage;
1500 spin_unlock (&dev->lock);
1501 value = usb_ep_queue (gadget->ep0, dev->req,
1502 GFP_KERNEL);
1503 spin_lock (&dev->lock);
1504 --dev->udc_usage;
1505 if (value < 0) {
1506 clean_req (gadget->ep0, dev->req);
1507 break;
1508 }
1509
1510 /* we can't currently stall these */
1511 dev->setup_can_stall = 0;
1512 }
1513
1514 /* state changes when reader collects event */
1515 event = next_event (dev, GADGETFS_SETUP);
1516 event->u.setup = *ctrl;
1517 ep0_readable (dev);
1518 spin_unlock (&dev->lock);
1519 /*
1520 * Return USB_GADGET_DELAYED_STATUS as a workaround to
1521 * stop some UDC drivers (e.g. dwc3) from automatically
1522 * proceeding with the status stage for 0-length
1523 * transfers.
1524 * Should be removed once all UDC drivers are fixed to
1525 * always delay the status stage until a response is
1526 * queued to EP0.
1527 */
1528 return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
1529 }
1530 }
1531
1532 /* proceed with data transfer and status phases? */
1533 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1534 req->length = value;
1535 req->zero = value < w_length;
1536
1537 ++dev->udc_usage;
1538 spin_unlock (&dev->lock);
1539 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1540 spin_lock(&dev->lock);
1541 --dev->udc_usage;
1542 spin_unlock(&dev->lock);
1543 if (value < 0) {
1544 DBG (dev, "ep_queue --> %d\n", value);
1545 req->status = 0;
1546 }
1547 return value;
1548 }
1549
1550 /* device stalls when value < 0 */
1551 spin_unlock (&dev->lock);
1552 return value;
1553 }
1554
destroy_ep_files(struct dev_data * dev)1555 static void destroy_ep_files (struct dev_data *dev)
1556 {
1557 DBG (dev, "%s %d\n", __func__, dev->state);
1558
1559 /* dev->state must prevent interference */
1560 spin_lock_irq (&dev->lock);
1561 while (!list_empty(&dev->epfiles)) {
1562 struct ep_data *ep;
1563 struct inode *parent;
1564 struct dentry *dentry;
1565
1566 /* break link to FS */
1567 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1568 list_del_init (&ep->epfiles);
1569 spin_unlock_irq (&dev->lock);
1570
1571 dentry = ep->dentry;
1572 ep->dentry = NULL;
1573 parent = d_inode(dentry->d_parent);
1574
1575 /* break link to controller */
1576 mutex_lock(&ep->lock);
1577 if (ep->state == STATE_EP_ENABLED)
1578 (void) usb_ep_disable (ep->ep);
1579 ep->state = STATE_EP_UNBOUND;
1580 usb_ep_free_request (ep->ep, ep->req);
1581 ep->ep = NULL;
1582 mutex_unlock(&ep->lock);
1583
1584 wake_up (&ep->wait);
1585 put_ep (ep);
1586
1587 /* break link to dcache */
1588 inode_lock(parent);
1589 d_delete (dentry);
1590 dput (dentry);
1591 inode_unlock(parent);
1592
1593 spin_lock_irq (&dev->lock);
1594 }
1595 spin_unlock_irq (&dev->lock);
1596 }
1597
1598
1599 static struct dentry *
1600 gadgetfs_create_file (struct super_block *sb, char const *name,
1601 void *data, const struct file_operations *fops);
1602
activate_ep_files(struct dev_data * dev)1603 static int activate_ep_files (struct dev_data *dev)
1604 {
1605 struct usb_ep *ep;
1606 struct ep_data *data;
1607
1608 gadget_for_each_ep (ep, dev->gadget) {
1609
1610 data = kzalloc(sizeof(*data), GFP_KERNEL);
1611 if (!data)
1612 goto enomem0;
1613 data->state = STATE_EP_DISABLED;
1614 mutex_init(&data->lock);
1615 init_waitqueue_head (&data->wait);
1616
1617 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1618 refcount_set (&data->count, 1);
1619 data->dev = dev;
1620 get_dev (dev);
1621
1622 data->ep = ep;
1623 ep->driver_data = data;
1624
1625 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1626 if (!data->req)
1627 goto enomem1;
1628
1629 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1630 data, &ep_io_operations);
1631 if (!data->dentry)
1632 goto enomem2;
1633 list_add_tail (&data->epfiles, &dev->epfiles);
1634 }
1635 return 0;
1636
1637 enomem2:
1638 usb_ep_free_request (ep, data->req);
1639 enomem1:
1640 put_dev (dev);
1641 kfree (data);
1642 enomem0:
1643 DBG (dev, "%s enomem\n", __func__);
1644 destroy_ep_files (dev);
1645 return -ENOMEM;
1646 }
1647
1648 static void
gadgetfs_unbind(struct usb_gadget * gadget)1649 gadgetfs_unbind (struct usb_gadget *gadget)
1650 {
1651 struct dev_data *dev = get_gadget_data (gadget);
1652
1653 DBG (dev, "%s\n", __func__);
1654
1655 spin_lock_irq (&dev->lock);
1656 dev->state = STATE_DEV_UNBOUND;
1657 while (dev->udc_usage > 0) {
1658 spin_unlock_irq(&dev->lock);
1659 usleep_range(1000, 2000);
1660 spin_lock_irq(&dev->lock);
1661 }
1662 spin_unlock_irq (&dev->lock);
1663
1664 destroy_ep_files (dev);
1665 gadget->ep0->driver_data = NULL;
1666 set_gadget_data (gadget, NULL);
1667
1668 /* we've already been disconnected ... no i/o is active */
1669 if (dev->req)
1670 usb_ep_free_request (gadget->ep0, dev->req);
1671 DBG (dev, "%s done\n", __func__);
1672 put_dev (dev);
1673 }
1674
1675 static struct dev_data *the_device;
1676
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1677 static int gadgetfs_bind(struct usb_gadget *gadget,
1678 struct usb_gadget_driver *driver)
1679 {
1680 struct dev_data *dev = the_device;
1681
1682 if (!dev)
1683 return -ESRCH;
1684 if (0 != strcmp (CHIP, gadget->name)) {
1685 pr_err("%s expected %s controller not %s\n",
1686 shortname, CHIP, gadget->name);
1687 return -ENODEV;
1688 }
1689
1690 set_gadget_data (gadget, dev);
1691 dev->gadget = gadget;
1692 gadget->ep0->driver_data = dev;
1693
1694 /* preallocate control response and buffer */
1695 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1696 if (!dev->req)
1697 goto enomem;
1698 dev->req->context = NULL;
1699 dev->req->complete = epio_complete;
1700
1701 if (activate_ep_files (dev) < 0)
1702 goto enomem;
1703
1704 INFO (dev, "bound to %s driver\n", gadget->name);
1705 spin_lock_irq(&dev->lock);
1706 dev->state = STATE_DEV_UNCONNECTED;
1707 spin_unlock_irq(&dev->lock);
1708 get_dev (dev);
1709 return 0;
1710
1711 enomem:
1712 gadgetfs_unbind (gadget);
1713 return -ENOMEM;
1714 }
1715
1716 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1717 gadgetfs_disconnect (struct usb_gadget *gadget)
1718 {
1719 struct dev_data *dev = get_gadget_data (gadget);
1720 unsigned long flags;
1721
1722 spin_lock_irqsave (&dev->lock, flags);
1723 if (dev->state == STATE_DEV_UNCONNECTED)
1724 goto exit;
1725 dev->state = STATE_DEV_UNCONNECTED;
1726
1727 INFO (dev, "disconnected\n");
1728 next_event (dev, GADGETFS_DISCONNECT);
1729 ep0_readable (dev);
1730 exit:
1731 spin_unlock_irqrestore (&dev->lock, flags);
1732 }
1733
1734 static void
gadgetfs_suspend(struct usb_gadget * gadget)1735 gadgetfs_suspend (struct usb_gadget *gadget)
1736 {
1737 struct dev_data *dev = get_gadget_data (gadget);
1738 unsigned long flags;
1739
1740 INFO (dev, "suspended from state %d\n", dev->state);
1741 spin_lock_irqsave(&dev->lock, flags);
1742 switch (dev->state) {
1743 case STATE_DEV_SETUP: // VERY odd... host died??
1744 case STATE_DEV_CONNECTED:
1745 case STATE_DEV_UNCONNECTED:
1746 next_event (dev, GADGETFS_SUSPEND);
1747 ep0_readable (dev);
1748 fallthrough;
1749 default:
1750 break;
1751 }
1752 spin_unlock_irqrestore(&dev->lock, flags);
1753 }
1754
1755 static struct usb_gadget_driver gadgetfs_driver = {
1756 .function = (char *) driver_desc,
1757 .bind = gadgetfs_bind,
1758 .unbind = gadgetfs_unbind,
1759 .setup = gadgetfs_setup,
1760 .reset = gadgetfs_disconnect,
1761 .disconnect = gadgetfs_disconnect,
1762 .suspend = gadgetfs_suspend,
1763
1764 .driver = {
1765 .name = shortname,
1766 },
1767 };
1768
1769 /*----------------------------------------------------------------------*/
1770 /* DEVICE INITIALIZATION
1771 *
1772 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1773 * status = write (fd, descriptors, sizeof descriptors)
1774 *
1775 * That write establishes the device configuration, so the kernel can
1776 * bind to the controller ... guaranteeing it can handle enumeration
1777 * at all necessary speeds. Descriptor order is:
1778 *
1779 * . message tag (u32, host order) ... for now, must be zero; it
1780 * would change to support features like multi-config devices
1781 * . full/low speed config ... all wTotalLength bytes (with interface,
1782 * class, altsetting, endpoint, and other descriptors)
1783 * . high speed config ... all descriptors, for high speed operation;
1784 * this one's optional except for high-speed hardware
1785 * . device descriptor
1786 *
1787 * Endpoints are not yet enabled. Drivers must wait until device
1788 * configuration and interface altsetting changes create
1789 * the need to configure (or unconfigure) them.
1790 *
1791 * After initialization, the device stays active for as long as that
1792 * $CHIP file is open. Events must then be read from that descriptor,
1793 * such as configuration notifications.
1794 */
1795
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1796 static int is_valid_config(struct usb_config_descriptor *config,
1797 unsigned int total)
1798 {
1799 return config->bDescriptorType == USB_DT_CONFIG
1800 && config->bLength == USB_DT_CONFIG_SIZE
1801 && total >= USB_DT_CONFIG_SIZE
1802 && config->bConfigurationValue != 0
1803 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1804 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1805 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1806 /* FIXME check lengths: walk to end */
1807 }
1808
1809 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1810 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1811 {
1812 struct dev_data *dev = fd->private_data;
1813 ssize_t value, length = len;
1814 unsigned total;
1815 u32 tag;
1816 char *kbuf;
1817
1818 spin_lock_irq(&dev->lock);
1819 if (dev->state > STATE_DEV_OPENED) {
1820 value = ep0_write(fd, buf, len, ptr);
1821 spin_unlock_irq(&dev->lock);
1822 return value;
1823 }
1824 spin_unlock_irq(&dev->lock);
1825
1826 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1827 (len > PAGE_SIZE * 4))
1828 return -EINVAL;
1829
1830 /* we might need to change message format someday */
1831 if (copy_from_user (&tag, buf, 4))
1832 return -EFAULT;
1833 if (tag != 0)
1834 return -EINVAL;
1835 buf += 4;
1836 length -= 4;
1837
1838 kbuf = memdup_user(buf, length);
1839 if (IS_ERR(kbuf))
1840 return PTR_ERR(kbuf);
1841
1842 spin_lock_irq (&dev->lock);
1843 value = -EINVAL;
1844 if (dev->buf) {
1845 spin_unlock_irq(&dev->lock);
1846 kfree(kbuf);
1847 return value;
1848 }
1849 dev->buf = kbuf;
1850
1851 /* full or low speed config */
1852 dev->config = (void *) kbuf;
1853 total = le16_to_cpu(dev->config->wTotalLength);
1854 if (!is_valid_config(dev->config, total) ||
1855 total > length - USB_DT_DEVICE_SIZE)
1856 goto fail;
1857 kbuf += total;
1858 length -= total;
1859
1860 /* optional high speed config */
1861 if (kbuf [1] == USB_DT_CONFIG) {
1862 dev->hs_config = (void *) kbuf;
1863 total = le16_to_cpu(dev->hs_config->wTotalLength);
1864 if (!is_valid_config(dev->hs_config, total) ||
1865 total > length - USB_DT_DEVICE_SIZE)
1866 goto fail;
1867 kbuf += total;
1868 length -= total;
1869 } else {
1870 dev->hs_config = NULL;
1871 }
1872
1873 /* could support multiple configs, using another encoding! */
1874
1875 /* device descriptor (tweaked for paranoia) */
1876 if (length != USB_DT_DEVICE_SIZE)
1877 goto fail;
1878 dev->dev = (void *)kbuf;
1879 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1880 || dev->dev->bDescriptorType != USB_DT_DEVICE
1881 || dev->dev->bNumConfigurations != 1)
1882 goto fail;
1883 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1884
1885 /* triggers gadgetfs_bind(); then we can enumerate. */
1886 spin_unlock_irq (&dev->lock);
1887 if (dev->hs_config)
1888 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1889 else
1890 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1891
1892 value = usb_gadget_register_driver(&gadgetfs_driver);
1893 if (value != 0) {
1894 spin_lock_irq(&dev->lock);
1895 goto fail;
1896 } else {
1897 /* at this point "good" hardware has for the first time
1898 * let the USB the host see us. alternatively, if users
1899 * unplug/replug that will clear all the error state.
1900 *
1901 * note: everything running before here was guaranteed
1902 * to choke driver model style diagnostics. from here
1903 * on, they can work ... except in cleanup paths that
1904 * kick in after the ep0 descriptor is closed.
1905 */
1906 value = len;
1907 dev->gadget_registered = true;
1908 }
1909 return value;
1910
1911 fail:
1912 dev->config = NULL;
1913 dev->hs_config = NULL;
1914 dev->dev = NULL;
1915 spin_unlock_irq (&dev->lock);
1916 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1917 kfree (dev->buf);
1918 dev->buf = NULL;
1919 return value;
1920 }
1921
1922 static int
gadget_dev_open(struct inode * inode,struct file * fd)1923 gadget_dev_open (struct inode *inode, struct file *fd)
1924 {
1925 struct dev_data *dev = inode->i_private;
1926 int value = -EBUSY;
1927
1928 spin_lock_irq(&dev->lock);
1929 if (dev->state == STATE_DEV_DISABLED) {
1930 dev->ev_next = 0;
1931 dev->state = STATE_DEV_OPENED;
1932 fd->private_data = dev;
1933 get_dev (dev);
1934 value = 0;
1935 }
1936 spin_unlock_irq(&dev->lock);
1937 return value;
1938 }
1939
1940 static const struct file_operations ep0_operations = {
1941
1942 .open = gadget_dev_open,
1943 .read = ep0_read,
1944 .write = dev_config,
1945 .fasync = ep0_fasync,
1946 .poll = ep0_poll,
1947 .unlocked_ioctl = gadget_dev_ioctl,
1948 .release = dev_release,
1949 };
1950
1951 /*----------------------------------------------------------------------*/
1952
1953 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1954 *
1955 * Mounting the filesystem creates a controller file, used first for
1956 * device configuration then later for event monitoring.
1957 */
1958
1959
1960 /* FIXME PAM etc could set this security policy without mount options
1961 * if epfiles inherited ownership and permissons from ep0 ...
1962 */
1963
1964 static unsigned default_uid;
1965 static unsigned default_gid;
1966 static unsigned default_perm = S_IRUSR | S_IWUSR;
1967
1968 module_param (default_uid, uint, 0644);
1969 module_param (default_gid, uint, 0644);
1970 module_param (default_perm, uint, 0644);
1971
1972
1973 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1974 gadgetfs_make_inode (struct super_block *sb,
1975 void *data, const struct file_operations *fops,
1976 int mode)
1977 {
1978 struct inode *inode = new_inode (sb);
1979
1980 if (inode) {
1981 inode->i_ino = get_next_ino();
1982 inode->i_mode = mode;
1983 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1984 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1985 simple_inode_init_ts(inode);
1986 inode->i_private = data;
1987 inode->i_fop = fops;
1988 }
1989 return inode;
1990 }
1991
1992 /* creates in fs root directory, so non-renamable and non-linkable.
1993 * so inode and dentry are paired, until device reconfig.
1994 */
1995 static struct dentry *
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1996 gadgetfs_create_file (struct super_block *sb, char const *name,
1997 void *data, const struct file_operations *fops)
1998 {
1999 struct dentry *dentry;
2000 struct inode *inode;
2001
2002 dentry = d_alloc_name(sb->s_root, name);
2003 if (!dentry)
2004 return NULL;
2005
2006 inode = gadgetfs_make_inode (sb, data, fops,
2007 S_IFREG | (default_perm & S_IRWXUGO));
2008 if (!inode) {
2009 dput(dentry);
2010 return NULL;
2011 }
2012 d_add (dentry, inode);
2013 return dentry;
2014 }
2015
2016 static const struct super_operations gadget_fs_operations = {
2017 .statfs = simple_statfs,
2018 .drop_inode = generic_delete_inode,
2019 };
2020
2021 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2022 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2023 {
2024 struct inode *inode;
2025 struct dev_data *dev;
2026 int rc;
2027
2028 mutex_lock(&sb_mutex);
2029
2030 if (the_device) {
2031 rc = -ESRCH;
2032 goto Done;
2033 }
2034
2035 CHIP = usb_get_gadget_udc_name();
2036 if (!CHIP) {
2037 rc = -ENODEV;
2038 goto Done;
2039 }
2040
2041 /* superblock */
2042 sb->s_blocksize = PAGE_SIZE;
2043 sb->s_blocksize_bits = PAGE_SHIFT;
2044 sb->s_magic = GADGETFS_MAGIC;
2045 sb->s_op = &gadget_fs_operations;
2046 sb->s_time_gran = 1;
2047
2048 /* root inode */
2049 inode = gadgetfs_make_inode (sb,
2050 NULL, &simple_dir_operations,
2051 S_IFDIR | S_IRUGO | S_IXUGO);
2052 if (!inode)
2053 goto Enomem;
2054 inode->i_op = &simple_dir_inode_operations;
2055 if (!(sb->s_root = d_make_root (inode)))
2056 goto Enomem;
2057
2058 /* the ep0 file is named after the controller we expect;
2059 * user mode code can use it for sanity checks, like we do.
2060 */
2061 dev = dev_new ();
2062 if (!dev)
2063 goto Enomem;
2064
2065 dev->sb = sb;
2066 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2067 if (!dev->dentry) {
2068 put_dev(dev);
2069 goto Enomem;
2070 }
2071
2072 /* other endpoint files are available after hardware setup,
2073 * from binding to a controller.
2074 */
2075 the_device = dev;
2076 rc = 0;
2077 goto Done;
2078
2079 Enomem:
2080 kfree(CHIP);
2081 CHIP = NULL;
2082 rc = -ENOMEM;
2083
2084 Done:
2085 mutex_unlock(&sb_mutex);
2086 return rc;
2087 }
2088
2089 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2090 static int gadgetfs_get_tree(struct fs_context *fc)
2091 {
2092 return get_tree_single(fc, gadgetfs_fill_super);
2093 }
2094
2095 static const struct fs_context_operations gadgetfs_context_ops = {
2096 .get_tree = gadgetfs_get_tree,
2097 };
2098
gadgetfs_init_fs_context(struct fs_context * fc)2099 static int gadgetfs_init_fs_context(struct fs_context *fc)
2100 {
2101 fc->ops = &gadgetfs_context_ops;
2102 return 0;
2103 }
2104
2105 static void
gadgetfs_kill_sb(struct super_block * sb)2106 gadgetfs_kill_sb (struct super_block *sb)
2107 {
2108 mutex_lock(&sb_mutex);
2109 kill_litter_super (sb);
2110 if (the_device) {
2111 put_dev (the_device);
2112 the_device = NULL;
2113 }
2114 kfree(CHIP);
2115 CHIP = NULL;
2116 mutex_unlock(&sb_mutex);
2117 }
2118
2119 /*----------------------------------------------------------------------*/
2120
2121 static struct file_system_type gadgetfs_type = {
2122 .owner = THIS_MODULE,
2123 .name = shortname,
2124 .init_fs_context = gadgetfs_init_fs_context,
2125 .kill_sb = gadgetfs_kill_sb,
2126 };
2127 MODULE_ALIAS_FS("gadgetfs");
2128
2129 /*----------------------------------------------------------------------*/
2130
gadgetfs_init(void)2131 static int __init gadgetfs_init (void)
2132 {
2133 int status;
2134
2135 status = register_filesystem (&gadgetfs_type);
2136 if (status == 0)
2137 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2138 shortname, driver_desc);
2139 return status;
2140 }
2141 module_init (gadgetfs_init);
2142
gadgetfs_cleanup(void)2143 static void __exit gadgetfs_cleanup (void)
2144 {
2145 pr_debug ("unregister %s\n", shortname);
2146 unregister_filesystem (&gadgetfs_type);
2147 }
2148 module_exit (gadgetfs_cleanup);
2149
2150