xref: /linux/drivers/usb/gadget/legacy/inode.c (revision 7cd122b55283d3ceef71a5b723ccaa03a72284b4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * inode.c -- user mode filesystem api for usb gadget controllers
4  *
5  * Copyright (C) 2003-2004 David Brownell
6  * Copyright (C) 2003 Agilent Technologies
7  */
8 
9 
10 /* #define VERBOSE_DEBUG */
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/string_choices.h>
24 #include <linux/poll.h>
25 #include <linux/kthread.h>
26 #include <linux/aio.h>
27 #include <linux/uio.h>
28 #include <linux/refcount.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/moduleparam.h>
32 
33 #include <linux/usb/gadgetfs.h>
34 #include <linux/usb/gadget.h>
35 #include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
36 
37 /* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
38 #undef DBG
39 #undef ERROR
40 #undef INFO
41 
42 
43 /*
44  * The gadgetfs API maps each endpoint to a file descriptor so that you
45  * can use standard synchronous read/write calls for I/O.  There's some
46  * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
47  * drivers show how this works in practice.  You can also use AIO to
48  * eliminate I/O gaps between requests, to help when streaming data.
49  *
50  * Key parts that must be USB-specific are protocols defining how the
51  * read/write operations relate to the hardware state machines.  There
52  * are two types of files.  One type is for the device, implementing ep0.
53  * The other type is for each IN or OUT endpoint.  In both cases, the
54  * user mode driver must configure the hardware before using it.
55  *
56  * - First, dev_config() is called when /dev/gadget/$CHIP is configured
57  *   (by writing configuration and device descriptors).  Afterwards it
58  *   may serve as a source of device events, used to handle all control
59  *   requests other than basic enumeration.
60  *
61  * - Then, after a SET_CONFIGURATION control request, ep_config() is
62  *   called when each /dev/gadget/ep* file is configured (by writing
63  *   endpoint descriptors).  Afterwards these files are used to write()
64  *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
65  *   direction" request is issued (like reading an IN endpoint).
66  *
67  * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
68  * not possible on all hardware.  For example, precise fault handling with
69  * respect to data left in endpoint fifos after aborted operations; or
70  * selective clearing of endpoint halts, to implement SET_INTERFACE.
71  */
72 
73 #define	DRIVER_DESC	"USB Gadget filesystem"
74 #define	DRIVER_VERSION	"24 Aug 2004"
75 
76 static const char driver_desc [] = DRIVER_DESC;
77 static const char shortname [] = "gadgetfs";
78 
79 MODULE_DESCRIPTION (DRIVER_DESC);
80 MODULE_AUTHOR ("David Brownell");
81 MODULE_LICENSE ("GPL");
82 
83 static int ep_open(struct inode *, struct file *);
84 
85 
86 /*----------------------------------------------------------------------*/
87 
88 #define GADGETFS_MAGIC		0xaee71ee7
89 
90 /* /dev/gadget/$CHIP represents ep0 and the whole device */
91 enum ep0_state {
92 	/* DISABLED is the initial state. */
93 	STATE_DEV_DISABLED = 0,
94 
95 	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
96 	 * ep0/device i/o modes and binding to the controller.  Driver
97 	 * must always write descriptors to initialize the device, then
98 	 * the device becomes UNCONNECTED until enumeration.
99 	 */
100 	STATE_DEV_OPENED,
101 
102 	/* From then on, ep0 fd is in either of two basic modes:
103 	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
104 	 * - SETUP: read/write will transfer control data and succeed;
105 	 *   or if "wrong direction", performs protocol stall
106 	 */
107 	STATE_DEV_UNCONNECTED,
108 	STATE_DEV_CONNECTED,
109 	STATE_DEV_SETUP,
110 
111 	/* UNBOUND means the driver closed ep0, so the device won't be
112 	 * accessible again (DEV_DISABLED) until all fds are closed.
113 	 */
114 	STATE_DEV_UNBOUND,
115 };
116 
117 /* enough for the whole queue: most events invalidate others */
118 #define	N_EVENT			5
119 
120 #define RBUF_SIZE		256
121 
122 struct dev_data {
123 	spinlock_t			lock;
124 	refcount_t			count;
125 	int				udc_usage;
126 	enum ep0_state			state;		/* P: lock */
127 	struct usb_gadgetfs_event	event [N_EVENT];
128 	unsigned			ev_next;
129 	struct fasync_struct		*fasync;
130 	u8				current_config;
131 
132 	/* drivers reading ep0 MUST handle control requests (SETUP)
133 	 * reported that way; else the host will time out.
134 	 */
135 	unsigned			usermode_setup : 1,
136 					setup_in : 1,
137 					setup_can_stall : 1,
138 					setup_out_ready : 1,
139 					setup_out_error : 1,
140 					setup_abort : 1,
141 					gadget_registered : 1;
142 	unsigned			setup_wLength;
143 
144 	/* the rest is basically write-once */
145 	struct usb_config_descriptor	*config, *hs_config;
146 	struct usb_device_descriptor	*dev;
147 	struct usb_request		*req;
148 	struct usb_gadget		*gadget;
149 	struct list_head		epfiles;
150 	void				*buf;
151 	wait_queue_head_t		wait;
152 	struct super_block		*sb;
153 
154 	/* except this scratch i/o buffer for ep0 */
155 	u8				rbuf[RBUF_SIZE];
156 };
157 
get_dev(struct dev_data * data)158 static inline void get_dev (struct dev_data *data)
159 {
160 	refcount_inc (&data->count);
161 }
162 
put_dev(struct dev_data * data)163 static void put_dev (struct dev_data *data)
164 {
165 	if (likely (!refcount_dec_and_test (&data->count)))
166 		return;
167 	/* needs no more cleanup */
168 	BUG_ON (waitqueue_active (&data->wait));
169 	kfree (data);
170 }
171 
dev_new(void)172 static struct dev_data *dev_new (void)
173 {
174 	struct dev_data		*dev;
175 
176 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
177 	if (!dev)
178 		return NULL;
179 	dev->state = STATE_DEV_DISABLED;
180 	refcount_set (&dev->count, 1);
181 	spin_lock_init (&dev->lock);
182 	INIT_LIST_HEAD (&dev->epfiles);
183 	init_waitqueue_head (&dev->wait);
184 	return dev;
185 }
186 
187 /*----------------------------------------------------------------------*/
188 
189 /* other /dev/gadget/$ENDPOINT files represent endpoints */
190 enum ep_state {
191 	STATE_EP_DISABLED = 0,
192 	STATE_EP_READY,
193 	STATE_EP_ENABLED,
194 	STATE_EP_UNBOUND,
195 };
196 
197 struct ep_data {
198 	struct mutex			lock;
199 	enum ep_state			state;
200 	refcount_t			count;
201 	struct dev_data			*dev;
202 	/* must hold dev->lock before accessing ep or req */
203 	struct usb_ep			*ep;
204 	struct usb_request		*req;
205 	ssize_t				status;
206 	char				name [16];
207 	struct usb_endpoint_descriptor	desc, hs_desc;
208 	struct list_head		epfiles;
209 	wait_queue_head_t		wait;
210 };
211 
get_ep(struct ep_data * data)212 static inline void get_ep (struct ep_data *data)
213 {
214 	refcount_inc (&data->count);
215 }
216 
put_ep(struct ep_data * data)217 static void put_ep (struct ep_data *data)
218 {
219 	if (likely (!refcount_dec_and_test (&data->count)))
220 		return;
221 	put_dev (data->dev);
222 	/* needs no more cleanup */
223 	BUG_ON (!list_empty (&data->epfiles));
224 	BUG_ON (waitqueue_active (&data->wait));
225 	kfree (data);
226 }
227 
228 /*----------------------------------------------------------------------*/
229 
230 /* most "how to use the hardware" policy choices are in userspace:
231  * mapping endpoint roles (which the driver needs) to the capabilities
232  * which the usb controller has.  most of those capabilities are exposed
233  * implicitly, starting with the driver name and then endpoint names.
234  */
235 
236 static const char *CHIP;
237 static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
238 
239 /*----------------------------------------------------------------------*/
240 
241 /* NOTE:  don't use dev_printk calls before binding to the gadget
242  * at the end of ep0 configuration, or after unbind.
243  */
244 
245 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
246 #define xprintk(d,level,fmt,args...) \
247 	printk(level "%s: " fmt , shortname , ## args)
248 
249 #ifdef DEBUG
250 #define DBG(dev,fmt,args...) \
251 	xprintk(dev , KERN_DEBUG , fmt , ## args)
252 #else
253 #define DBG(dev,fmt,args...) \
254 	do { } while (0)
255 #endif /* DEBUG */
256 
257 #ifdef VERBOSE_DEBUG
258 #define VDEBUG	DBG
259 #else
260 #define VDEBUG(dev,fmt,args...) \
261 	do { } while (0)
262 #endif /* DEBUG */
263 
264 #define ERROR(dev,fmt,args...) \
265 	xprintk(dev , KERN_ERR , fmt , ## args)
266 #define INFO(dev,fmt,args...) \
267 	xprintk(dev , KERN_INFO , fmt , ## args)
268 
269 
270 /*----------------------------------------------------------------------*/
271 
272 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
273  *
274  * After opening, configure non-control endpoints.  Then use normal
275  * stream read() and write() requests; and maybe ioctl() to get more
276  * precise FIFO status when recovering from cancellation.
277  */
278 
epio_complete(struct usb_ep * ep,struct usb_request * req)279 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
280 {
281 	struct ep_data	*epdata = ep->driver_data;
282 
283 	if (!req->context)
284 		return;
285 	if (req->status)
286 		epdata->status = req->status;
287 	else
288 		epdata->status = req->actual;
289 	complete ((struct completion *)req->context);
290 }
291 
292 /* tasklock endpoint, returning when it's connected.
293  * still need dev->lock to use epdata->ep.
294  */
295 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)296 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
297 {
298 	int	val;
299 
300 	if (f_flags & O_NONBLOCK) {
301 		if (!mutex_trylock(&epdata->lock))
302 			goto nonblock;
303 		if (epdata->state != STATE_EP_ENABLED &&
304 		    (!is_write || epdata->state != STATE_EP_READY)) {
305 			mutex_unlock(&epdata->lock);
306 nonblock:
307 			val = -EAGAIN;
308 		} else
309 			val = 0;
310 		return val;
311 	}
312 
313 	val = mutex_lock_interruptible(&epdata->lock);
314 	if (val < 0)
315 		return val;
316 
317 	switch (epdata->state) {
318 	case STATE_EP_ENABLED:
319 		return 0;
320 	case STATE_EP_READY:			/* not configured yet */
321 		if (is_write)
322 			return 0;
323 		fallthrough;
324 	case STATE_EP_UNBOUND:			/* clean disconnect */
325 		break;
326 	// case STATE_EP_DISABLED:		/* "can't happen" */
327 	default:				/* error! */
328 		pr_debug ("%s: ep %p not available, state %d\n",
329 				shortname, epdata, epdata->state);
330 	}
331 	mutex_unlock(&epdata->lock);
332 	return -ENODEV;
333 }
334 
335 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)336 ep_io (struct ep_data *epdata, void *buf, unsigned len)
337 {
338 	DECLARE_COMPLETION_ONSTACK (done);
339 	int value;
340 
341 	spin_lock_irq (&epdata->dev->lock);
342 	if (likely (epdata->ep != NULL)) {
343 		struct usb_request	*req = epdata->req;
344 
345 		req->context = &done;
346 		req->complete = epio_complete;
347 		req->buf = buf;
348 		req->length = len;
349 		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
350 	} else
351 		value = -ENODEV;
352 	spin_unlock_irq (&epdata->dev->lock);
353 
354 	if (likely (value == 0)) {
355 		value = wait_for_completion_interruptible(&done);
356 		if (value != 0) {
357 			spin_lock_irq (&epdata->dev->lock);
358 			if (likely (epdata->ep != NULL)) {
359 				DBG (epdata->dev, "%s i/o interrupted\n",
360 						epdata->name);
361 				usb_ep_dequeue (epdata->ep, epdata->req);
362 				spin_unlock_irq (&epdata->dev->lock);
363 
364 				wait_for_completion(&done);
365 				if (epdata->status == -ECONNRESET)
366 					epdata->status = -EINTR;
367 			} else {
368 				spin_unlock_irq (&epdata->dev->lock);
369 
370 				DBG (epdata->dev, "endpoint gone\n");
371 				wait_for_completion(&done);
372 				epdata->status = -ENODEV;
373 			}
374 		}
375 		return epdata->status;
376 	}
377 	return value;
378 }
379 
380 static int
ep_release(struct inode * inode,struct file * fd)381 ep_release (struct inode *inode, struct file *fd)
382 {
383 	struct ep_data		*data = fd->private_data;
384 	int value;
385 
386 	value = mutex_lock_interruptible(&data->lock);
387 	if (value < 0)
388 		return value;
389 
390 	/* clean up if this can be reopened */
391 	if (data->state != STATE_EP_UNBOUND) {
392 		data->state = STATE_EP_DISABLED;
393 		data->desc.bDescriptorType = 0;
394 		data->hs_desc.bDescriptorType = 0;
395 		usb_ep_disable(data->ep);
396 	}
397 	mutex_unlock(&data->lock);
398 	put_ep (data);
399 	return 0;
400 }
401 
ep_ioctl(struct file * fd,unsigned code,unsigned long value)402 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
403 {
404 	struct ep_data		*data = fd->private_data;
405 	int			status;
406 
407 	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
408 		return status;
409 
410 	spin_lock_irq (&data->dev->lock);
411 	if (likely (data->ep != NULL)) {
412 		switch (code) {
413 		case GADGETFS_FIFO_STATUS:
414 			status = usb_ep_fifo_status (data->ep);
415 			break;
416 		case GADGETFS_FIFO_FLUSH:
417 			usb_ep_fifo_flush (data->ep);
418 			break;
419 		case GADGETFS_CLEAR_HALT:
420 			status = usb_ep_clear_halt (data->ep);
421 			break;
422 		default:
423 			status = -ENOTTY;
424 		}
425 	} else
426 		status = -ENODEV;
427 	spin_unlock_irq (&data->dev->lock);
428 	mutex_unlock(&data->lock);
429 	return status;
430 }
431 
432 /*----------------------------------------------------------------------*/
433 
434 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
435 
436 struct kiocb_priv {
437 	struct usb_request	*req;
438 	struct ep_data		*epdata;
439 	struct kiocb		*iocb;
440 	struct mm_struct	*mm;
441 	struct work_struct	work;
442 	void			*buf;
443 	struct iov_iter		to;
444 	const void		*to_free;
445 	unsigned		actual;
446 };
447 
ep_aio_cancel(struct kiocb * iocb)448 static int ep_aio_cancel(struct kiocb *iocb)
449 {
450 	struct kiocb_priv	*priv = iocb->private;
451 	struct ep_data		*epdata;
452 	int			value;
453 
454 	local_irq_disable();
455 	epdata = priv->epdata;
456 	// spin_lock(&epdata->dev->lock);
457 	if (likely(epdata && epdata->ep && priv->req))
458 		value = usb_ep_dequeue (epdata->ep, priv->req);
459 	else
460 		value = -EINVAL;
461 	// spin_unlock(&epdata->dev->lock);
462 	local_irq_enable();
463 
464 	return value;
465 }
466 
ep_user_copy_worker(struct work_struct * work)467 static void ep_user_copy_worker(struct work_struct *work)
468 {
469 	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
470 	struct mm_struct *mm = priv->mm;
471 	struct kiocb *iocb = priv->iocb;
472 	size_t ret;
473 
474 	kthread_use_mm(mm);
475 	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
476 	kthread_unuse_mm(mm);
477 	if (!ret)
478 		ret = -EFAULT;
479 
480 	/* completing the iocb can drop the ctx and mm, don't touch mm after */
481 	iocb->ki_complete(iocb, ret);
482 
483 	kfree(priv->buf);
484 	kfree(priv->to_free);
485 	kfree(priv);
486 }
487 
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)488 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
489 {
490 	struct kiocb		*iocb = req->context;
491 	struct kiocb_priv	*priv = iocb->private;
492 	struct ep_data		*epdata = priv->epdata;
493 
494 	/* lock against disconnect (and ideally, cancel) */
495 	spin_lock(&epdata->dev->lock);
496 	priv->req = NULL;
497 	priv->epdata = NULL;
498 
499 	/* if this was a write or a read returning no data then we
500 	 * don't need to copy anything to userspace, so we can
501 	 * complete the aio request immediately.
502 	 */
503 	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
504 		kfree(req->buf);
505 		kfree(priv->to_free);
506 		kfree(priv);
507 		iocb->private = NULL;
508 		iocb->ki_complete(iocb,
509 				req->actual ? req->actual : (long)req->status);
510 	} else {
511 		/* ep_copy_to_user() won't report both; we hide some faults */
512 		if (unlikely(0 != req->status))
513 			DBG(epdata->dev, "%s fault %d len %d\n",
514 				ep->name, req->status, req->actual);
515 
516 		priv->buf = req->buf;
517 		priv->actual = req->actual;
518 		INIT_WORK(&priv->work, ep_user_copy_worker);
519 		schedule_work(&priv->work);
520 	}
521 
522 	usb_ep_free_request(ep, req);
523 	spin_unlock(&epdata->dev->lock);
524 	put_ep(epdata);
525 }
526 
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)527 static ssize_t ep_aio(struct kiocb *iocb,
528 		      struct kiocb_priv *priv,
529 		      struct ep_data *epdata,
530 		      char *buf,
531 		      size_t len)
532 {
533 	struct usb_request *req;
534 	ssize_t value;
535 
536 	iocb->private = priv;
537 	priv->iocb = iocb;
538 
539 	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
540 	get_ep(epdata);
541 	priv->epdata = epdata;
542 	priv->actual = 0;
543 	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
544 
545 	/* each kiocb is coupled to one usb_request, but we can't
546 	 * allocate or submit those if the host disconnected.
547 	 */
548 	spin_lock_irq(&epdata->dev->lock);
549 	value = -ENODEV;
550 	if (unlikely(epdata->ep == NULL))
551 		goto fail;
552 
553 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
554 	value = -ENOMEM;
555 	if (unlikely(!req))
556 		goto fail;
557 
558 	priv->req = req;
559 	req->buf = buf;
560 	req->length = len;
561 	req->complete = ep_aio_complete;
562 	req->context = iocb;
563 	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
564 	if (unlikely(0 != value)) {
565 		usb_ep_free_request(epdata->ep, req);
566 		goto fail;
567 	}
568 	spin_unlock_irq(&epdata->dev->lock);
569 	return -EIOCBQUEUED;
570 
571 fail:
572 	spin_unlock_irq(&epdata->dev->lock);
573 	kfree(priv->to_free);
574 	kfree(priv);
575 	put_ep(epdata);
576 	return value;
577 }
578 
579 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)580 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
581 {
582 	struct file *file = iocb->ki_filp;
583 	struct ep_data *epdata = file->private_data;
584 	size_t len = iov_iter_count(to);
585 	ssize_t value;
586 	char *buf;
587 
588 	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
589 		return value;
590 
591 	/* halt any endpoint by doing a "wrong direction" i/o call */
592 	if (usb_endpoint_dir_in(&epdata->desc)) {
593 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
594 		    !is_sync_kiocb(iocb)) {
595 			mutex_unlock(&epdata->lock);
596 			return -EINVAL;
597 		}
598 		DBG (epdata->dev, "%s halt\n", epdata->name);
599 		spin_lock_irq(&epdata->dev->lock);
600 		if (likely(epdata->ep != NULL))
601 			usb_ep_set_halt(epdata->ep);
602 		spin_unlock_irq(&epdata->dev->lock);
603 		mutex_unlock(&epdata->lock);
604 		return -EBADMSG;
605 	}
606 
607 	buf = kmalloc(len, GFP_KERNEL);
608 	if (unlikely(!buf)) {
609 		mutex_unlock(&epdata->lock);
610 		return -ENOMEM;
611 	}
612 	if (is_sync_kiocb(iocb)) {
613 		value = ep_io(epdata, buf, len);
614 		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
615 			value = -EFAULT;
616 	} else {
617 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
618 		value = -ENOMEM;
619 		if (!priv)
620 			goto fail;
621 		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
622 		if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
623 			kfree(priv);
624 			goto fail;
625 		}
626 		value = ep_aio(iocb, priv, epdata, buf, len);
627 		if (value == -EIOCBQUEUED)
628 			buf = NULL;
629 	}
630 fail:
631 	kfree(buf);
632 	mutex_unlock(&epdata->lock);
633 	return value;
634 }
635 
636 static ssize_t ep_config(struct ep_data *, const char *, size_t);
637 
638 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)639 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
640 {
641 	struct file *file = iocb->ki_filp;
642 	struct ep_data *epdata = file->private_data;
643 	size_t len = iov_iter_count(from);
644 	bool configured;
645 	ssize_t value;
646 	char *buf;
647 
648 	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
649 		return value;
650 
651 	configured = epdata->state == STATE_EP_ENABLED;
652 
653 	/* halt any endpoint by doing a "wrong direction" i/o call */
654 	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
655 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
656 		    !is_sync_kiocb(iocb)) {
657 			mutex_unlock(&epdata->lock);
658 			return -EINVAL;
659 		}
660 		DBG (epdata->dev, "%s halt\n", epdata->name);
661 		spin_lock_irq(&epdata->dev->lock);
662 		if (likely(epdata->ep != NULL))
663 			usb_ep_set_halt(epdata->ep);
664 		spin_unlock_irq(&epdata->dev->lock);
665 		mutex_unlock(&epdata->lock);
666 		return -EBADMSG;
667 	}
668 
669 	buf = kmalloc(len, GFP_KERNEL);
670 	if (unlikely(!buf)) {
671 		mutex_unlock(&epdata->lock);
672 		return -ENOMEM;
673 	}
674 
675 	if (unlikely(!copy_from_iter_full(buf, len, from))) {
676 		value = -EFAULT;
677 		goto out;
678 	}
679 
680 	if (unlikely(!configured)) {
681 		value = ep_config(epdata, buf, len);
682 	} else if (is_sync_kiocb(iocb)) {
683 		value = ep_io(epdata, buf, len);
684 	} else {
685 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
686 		value = -ENOMEM;
687 		if (priv) {
688 			value = ep_aio(iocb, priv, epdata, buf, len);
689 			if (value == -EIOCBQUEUED)
690 				buf = NULL;
691 		}
692 	}
693 out:
694 	kfree(buf);
695 	mutex_unlock(&epdata->lock);
696 	return value;
697 }
698 
699 /*----------------------------------------------------------------------*/
700 
701 /* used after endpoint configuration */
702 static const struct file_operations ep_io_operations = {
703 	.owner =	THIS_MODULE,
704 
705 	.open =		ep_open,
706 	.release =	ep_release,
707 	.unlocked_ioctl = ep_ioctl,
708 	.read_iter =	ep_read_iter,
709 	.write_iter =	ep_write_iter,
710 };
711 
712 /* ENDPOINT INITIALIZATION
713  *
714  *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
715  *     status = write (fd, descriptors, sizeof descriptors)
716  *
717  * That write establishes the endpoint configuration, configuring
718  * the controller to process bulk, interrupt, or isochronous transfers
719  * at the right maxpacket size, and so on.
720  *
721  * The descriptors are message type 1, identified by a host order u32
722  * at the beginning of what's written.  Descriptor order is: full/low
723  * speed descriptor, then optional high speed descriptor.
724  */
725 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)726 ep_config (struct ep_data *data, const char *buf, size_t len)
727 {
728 	struct usb_ep		*ep;
729 	u32			tag;
730 	int			value, length = len;
731 
732 	if (data->state != STATE_EP_READY) {
733 		value = -EL2HLT;
734 		goto fail;
735 	}
736 
737 	value = len;
738 	if (len < USB_DT_ENDPOINT_SIZE + 4)
739 		goto fail0;
740 
741 	/* we might need to change message format someday */
742 	memcpy(&tag, buf, 4);
743 	if (tag != 1) {
744 		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
745 		goto fail0;
746 	}
747 	buf += 4;
748 	len -= 4;
749 
750 	/* NOTE:  audio endpoint extensions not accepted here;
751 	 * just don't include the extra bytes.
752 	 */
753 
754 	/* full/low speed descriptor, then high speed */
755 	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
756 	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
757 			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
758 		goto fail0;
759 	if (len != USB_DT_ENDPOINT_SIZE) {
760 		if (len != 2 * USB_DT_ENDPOINT_SIZE)
761 			goto fail0;
762 		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
763 			USB_DT_ENDPOINT_SIZE);
764 		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
765 				|| data->hs_desc.bDescriptorType
766 					!= USB_DT_ENDPOINT) {
767 			DBG(data->dev, "config %s, bad hs length or type\n",
768 					data->name);
769 			goto fail0;
770 		}
771 	}
772 
773 	spin_lock_irq (&data->dev->lock);
774 	if (data->dev->state == STATE_DEV_UNBOUND) {
775 		value = -ENOENT;
776 		goto gone;
777 	} else {
778 		ep = data->ep;
779 		if (ep == NULL) {
780 			value = -ENODEV;
781 			goto gone;
782 		}
783 	}
784 	switch (data->dev->gadget->speed) {
785 	case USB_SPEED_LOW:
786 	case USB_SPEED_FULL:
787 		ep->desc = &data->desc;
788 		break;
789 	case USB_SPEED_HIGH:
790 		/* fails if caller didn't provide that descriptor... */
791 		ep->desc = &data->hs_desc;
792 		break;
793 	default:
794 		DBG(data->dev, "unconnected, %s init abandoned\n",
795 				data->name);
796 		value = -EINVAL;
797 		goto gone;
798 	}
799 	value = usb_ep_enable(ep);
800 	if (value == 0) {
801 		data->state = STATE_EP_ENABLED;
802 		value = length;
803 	}
804 gone:
805 	spin_unlock_irq (&data->dev->lock);
806 	if (value < 0) {
807 fail:
808 		data->desc.bDescriptorType = 0;
809 		data->hs_desc.bDescriptorType = 0;
810 	}
811 	return value;
812 fail0:
813 	value = -EINVAL;
814 	goto fail;
815 }
816 
817 static int
ep_open(struct inode * inode,struct file * fd)818 ep_open (struct inode *inode, struct file *fd)
819 {
820 	struct ep_data		*data = inode->i_private;
821 	int			value = -EBUSY;
822 
823 	if (mutex_lock_interruptible(&data->lock) != 0)
824 		return -EINTR;
825 	spin_lock_irq (&data->dev->lock);
826 	if (data->dev->state == STATE_DEV_UNBOUND)
827 		value = -ENOENT;
828 	else if (data->state == STATE_EP_DISABLED) {
829 		value = 0;
830 		data->state = STATE_EP_READY;
831 		get_ep (data);
832 		fd->private_data = data;
833 		VDEBUG (data->dev, "%s ready\n", data->name);
834 	} else
835 		DBG (data->dev, "%s state %d\n",
836 			data->name, data->state);
837 	spin_unlock_irq (&data->dev->lock);
838 	mutex_unlock(&data->lock);
839 	return value;
840 }
841 
842 /*----------------------------------------------------------------------*/
843 
844 /* EP0 IMPLEMENTATION can be partly in userspace.
845  *
846  * Drivers that use this facility receive various events, including
847  * control requests the kernel doesn't handle.  Drivers that don't
848  * use this facility may be too simple-minded for real applications.
849  */
850 
ep0_readable(struct dev_data * dev)851 static inline void ep0_readable (struct dev_data *dev)
852 {
853 	wake_up (&dev->wait);
854 	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
855 }
856 
clean_req(struct usb_ep * ep,struct usb_request * req)857 static void clean_req (struct usb_ep *ep, struct usb_request *req)
858 {
859 	struct dev_data		*dev = ep->driver_data;
860 
861 	if (req->buf != dev->rbuf) {
862 		kfree(req->buf);
863 		req->buf = dev->rbuf;
864 	}
865 	req->complete = epio_complete;
866 	dev->setup_out_ready = 0;
867 }
868 
ep0_complete(struct usb_ep * ep,struct usb_request * req)869 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
870 {
871 	struct dev_data		*dev = ep->driver_data;
872 	unsigned long		flags;
873 	int			free = 1;
874 
875 	/* for control OUT, data must still get to userspace */
876 	spin_lock_irqsave(&dev->lock, flags);
877 	if (!dev->setup_in) {
878 		dev->setup_out_error = (req->status != 0);
879 		if (!dev->setup_out_error)
880 			free = 0;
881 		dev->setup_out_ready = 1;
882 		ep0_readable (dev);
883 	}
884 
885 	/* clean up as appropriate */
886 	if (free && req->buf != &dev->rbuf)
887 		clean_req (ep, req);
888 	req->complete = epio_complete;
889 	spin_unlock_irqrestore(&dev->lock, flags);
890 }
891 
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)892 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
893 {
894 	struct dev_data	*dev = ep->driver_data;
895 
896 	if (dev->setup_out_ready) {
897 		DBG (dev, "ep0 request busy!\n");
898 		return -EBUSY;
899 	}
900 	if (len > sizeof (dev->rbuf))
901 		req->buf = kmalloc(len, GFP_ATOMIC);
902 	if (req->buf == NULL) {
903 		req->buf = dev->rbuf;
904 		return -ENOMEM;
905 	}
906 	req->complete = ep0_complete;
907 	req->length = len;
908 	req->zero = 0;
909 	return 0;
910 }
911 
912 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)913 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
914 {
915 	struct dev_data			*dev = fd->private_data;
916 	ssize_t				retval;
917 	enum ep0_state			state;
918 
919 	spin_lock_irq (&dev->lock);
920 	if (dev->state <= STATE_DEV_OPENED) {
921 		retval = -EINVAL;
922 		goto done;
923 	}
924 
925 	/* report fd mode change before acting on it */
926 	if (dev->setup_abort) {
927 		dev->setup_abort = 0;
928 		retval = -EIDRM;
929 		goto done;
930 	}
931 
932 	/* control DATA stage */
933 	if ((state = dev->state) == STATE_DEV_SETUP) {
934 
935 		if (dev->setup_in) {		/* stall IN */
936 			VDEBUG(dev, "ep0in stall\n");
937 			(void) usb_ep_set_halt (dev->gadget->ep0);
938 			retval = -EL2HLT;
939 			dev->state = STATE_DEV_CONNECTED;
940 
941 		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
942 			struct usb_ep		*ep = dev->gadget->ep0;
943 			struct usb_request	*req = dev->req;
944 
945 			if ((retval = setup_req (ep, req, 0)) == 0) {
946 				++dev->udc_usage;
947 				spin_unlock_irq (&dev->lock);
948 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
949 				spin_lock_irq (&dev->lock);
950 				--dev->udc_usage;
951 			}
952 			dev->state = STATE_DEV_CONNECTED;
953 
954 			/* assume that was SET_CONFIGURATION */
955 			if (dev->current_config) {
956 				unsigned power;
957 
958 				if (gadget_is_dualspeed(dev->gadget)
959 						&& (dev->gadget->speed
960 							== USB_SPEED_HIGH))
961 					power = dev->hs_config->bMaxPower;
962 				else
963 					power = dev->config->bMaxPower;
964 				usb_gadget_vbus_draw(dev->gadget, 2 * power);
965 			}
966 
967 		} else {			/* collect OUT data */
968 			if ((fd->f_flags & O_NONBLOCK) != 0
969 					&& !dev->setup_out_ready) {
970 				retval = -EAGAIN;
971 				goto done;
972 			}
973 			spin_unlock_irq (&dev->lock);
974 			retval = wait_event_interruptible (dev->wait,
975 					dev->setup_out_ready != 0);
976 
977 			/* FIXME state could change from under us */
978 			spin_lock_irq (&dev->lock);
979 			if (retval)
980 				goto done;
981 
982 			if (dev->state != STATE_DEV_SETUP) {
983 				retval = -ECANCELED;
984 				goto done;
985 			}
986 			dev->state = STATE_DEV_CONNECTED;
987 
988 			if (dev->setup_out_error)
989 				retval = -EIO;
990 			else {
991 				len = min (len, (size_t)dev->req->actual);
992 				++dev->udc_usage;
993 				spin_unlock_irq(&dev->lock);
994 				if (copy_to_user (buf, dev->req->buf, len))
995 					retval = -EFAULT;
996 				else
997 					retval = len;
998 				spin_lock_irq(&dev->lock);
999 				--dev->udc_usage;
1000 				clean_req (dev->gadget->ep0, dev->req);
1001 				/* NOTE userspace can't yet choose to stall */
1002 			}
1003 		}
1004 		goto done;
1005 	}
1006 
1007 	/* else normal: return event data */
1008 	if (len < sizeof dev->event [0]) {
1009 		retval = -EINVAL;
1010 		goto done;
1011 	}
1012 	len -= len % sizeof (struct usb_gadgetfs_event);
1013 	dev->usermode_setup = 1;
1014 
1015 scan:
1016 	/* return queued events right away */
1017 	if (dev->ev_next != 0) {
1018 		unsigned		i, n;
1019 
1020 		n = len / sizeof (struct usb_gadgetfs_event);
1021 		if (dev->ev_next < n)
1022 			n = dev->ev_next;
1023 
1024 		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1025 		for (i = 0; i < n; i++) {
1026 			if (dev->event [i].type == GADGETFS_SETUP) {
1027 				dev->state = STATE_DEV_SETUP;
1028 				n = i + 1;
1029 				break;
1030 			}
1031 		}
1032 		spin_unlock_irq (&dev->lock);
1033 		len = n * sizeof (struct usb_gadgetfs_event);
1034 		if (copy_to_user (buf, &dev->event, len))
1035 			retval = -EFAULT;
1036 		else
1037 			retval = len;
1038 		if (len > 0) {
1039 			/* NOTE this doesn't guard against broken drivers;
1040 			 * concurrent ep0 readers may lose events.
1041 			 */
1042 			spin_lock_irq (&dev->lock);
1043 			if (dev->ev_next > n) {
1044 				memmove(&dev->event[0], &dev->event[n],
1045 					sizeof (struct usb_gadgetfs_event)
1046 						* (dev->ev_next - n));
1047 			}
1048 			dev->ev_next -= n;
1049 			spin_unlock_irq (&dev->lock);
1050 		}
1051 		return retval;
1052 	}
1053 	if (fd->f_flags & O_NONBLOCK) {
1054 		retval = -EAGAIN;
1055 		goto done;
1056 	}
1057 
1058 	switch (state) {
1059 	default:
1060 		DBG (dev, "fail %s, state %d\n", __func__, state);
1061 		retval = -ESRCH;
1062 		break;
1063 	case STATE_DEV_UNCONNECTED:
1064 	case STATE_DEV_CONNECTED:
1065 		spin_unlock_irq (&dev->lock);
1066 		DBG (dev, "%s wait\n", __func__);
1067 
1068 		/* wait for events */
1069 		retval = wait_event_interruptible (dev->wait,
1070 				dev->ev_next != 0);
1071 		if (retval < 0)
1072 			return retval;
1073 		spin_lock_irq (&dev->lock);
1074 		goto scan;
1075 	}
1076 
1077 done:
1078 	spin_unlock_irq (&dev->lock);
1079 	return retval;
1080 }
1081 
1082 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1083 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1084 {
1085 	struct usb_gadgetfs_event	*event;
1086 	unsigned			i;
1087 
1088 	switch (type) {
1089 	/* these events purge the queue */
1090 	case GADGETFS_DISCONNECT:
1091 		if (dev->state == STATE_DEV_SETUP)
1092 			dev->setup_abort = 1;
1093 		fallthrough;
1094 	case GADGETFS_CONNECT:
1095 		dev->ev_next = 0;
1096 		break;
1097 	case GADGETFS_SETUP:		/* previous request timed out */
1098 	case GADGETFS_SUSPEND:		/* same effect */
1099 		/* these events can't be repeated */
1100 		for (i = 0; i != dev->ev_next; i++) {
1101 			if (dev->event [i].type != type)
1102 				continue;
1103 			DBG(dev, "discard old event[%d] %d\n", i, type);
1104 			dev->ev_next--;
1105 			if (i == dev->ev_next)
1106 				break;
1107 			/* indices start at zero, for simplicity */
1108 			memmove (&dev->event [i], &dev->event [i + 1],
1109 				sizeof (struct usb_gadgetfs_event)
1110 					* (dev->ev_next - i));
1111 		}
1112 		break;
1113 	default:
1114 		BUG ();
1115 	}
1116 	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1117 	event = &dev->event [dev->ev_next++];
1118 	BUG_ON (dev->ev_next > N_EVENT);
1119 	memset (event, 0, sizeof *event);
1120 	event->type = type;
1121 	return event;
1122 }
1123 
1124 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1125 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1126 {
1127 	struct dev_data		*dev = fd->private_data;
1128 	ssize_t			retval = -ESRCH;
1129 
1130 	/* report fd mode change before acting on it */
1131 	if (dev->setup_abort) {
1132 		dev->setup_abort = 0;
1133 		retval = -EIDRM;
1134 
1135 	/* data and/or status stage for control request */
1136 	} else if (dev->state == STATE_DEV_SETUP) {
1137 
1138 		len = min_t(size_t, len, dev->setup_wLength);
1139 		if (dev->setup_in) {
1140 			retval = setup_req (dev->gadget->ep0, dev->req, len);
1141 			if (retval == 0) {
1142 				dev->state = STATE_DEV_CONNECTED;
1143 				++dev->udc_usage;
1144 				spin_unlock_irq (&dev->lock);
1145 				if (copy_from_user (dev->req->buf, buf, len))
1146 					retval = -EFAULT;
1147 				else {
1148 					if (len < dev->setup_wLength)
1149 						dev->req->zero = 1;
1150 					retval = usb_ep_queue (
1151 						dev->gadget->ep0, dev->req,
1152 						GFP_KERNEL);
1153 				}
1154 				spin_lock_irq(&dev->lock);
1155 				--dev->udc_usage;
1156 				if (retval < 0) {
1157 					clean_req (dev->gadget->ep0, dev->req);
1158 				} else
1159 					retval = len;
1160 
1161 				return retval;
1162 			}
1163 
1164 		/* can stall some OUT transfers */
1165 		} else if (dev->setup_can_stall) {
1166 			VDEBUG(dev, "ep0out stall\n");
1167 			(void) usb_ep_set_halt (dev->gadget->ep0);
1168 			retval = -EL2HLT;
1169 			dev->state = STATE_DEV_CONNECTED;
1170 		} else {
1171 			DBG(dev, "bogus ep0out stall!\n");
1172 		}
1173 	} else
1174 		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1175 
1176 	return retval;
1177 }
1178 
1179 static int
ep0_fasync(int f,struct file * fd,int on)1180 ep0_fasync (int f, struct file *fd, int on)
1181 {
1182 	struct dev_data		*dev = fd->private_data;
1183 	// caller must F_SETOWN before signal delivery happens
1184 	VDEBUG(dev, "%s %s\n", __func__, str_on_off(on));
1185 	return fasync_helper (f, fd, on, &dev->fasync);
1186 }
1187 
1188 static struct usb_gadget_driver gadgetfs_driver;
1189 
1190 static int
dev_release(struct inode * inode,struct file * fd)1191 dev_release (struct inode *inode, struct file *fd)
1192 {
1193 	struct dev_data		*dev = fd->private_data;
1194 
1195 	/* closing ep0 === shutdown all */
1196 
1197 	if (dev->gadget_registered) {
1198 		usb_gadget_unregister_driver (&gadgetfs_driver);
1199 		dev->gadget_registered = false;
1200 	}
1201 
1202 	/* at this point "good" hardware has disconnected the
1203 	 * device from USB; the host won't see it any more.
1204 	 * alternatively, all host requests will time out.
1205 	 */
1206 
1207 	kfree (dev->buf);
1208 	dev->buf = NULL;
1209 
1210 	/* other endpoints were all decoupled from this device */
1211 	spin_lock_irq(&dev->lock);
1212 	dev->state = STATE_DEV_DISABLED;
1213 	spin_unlock_irq(&dev->lock);
1214 
1215 	put_dev (dev);
1216 	return 0;
1217 }
1218 
1219 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1220 ep0_poll (struct file *fd, poll_table *wait)
1221 {
1222 	struct dev_data         *dev = fd->private_data;
1223 	__poll_t                mask = 0;
1224 
1225 	if (dev->state <= STATE_DEV_OPENED)
1226 		return DEFAULT_POLLMASK;
1227 
1228 	poll_wait(fd, &dev->wait, wait);
1229 
1230 	spin_lock_irq(&dev->lock);
1231 
1232 	/* report fd mode change before acting on it */
1233 	if (dev->setup_abort) {
1234 		dev->setup_abort = 0;
1235 		mask = EPOLLHUP;
1236 		goto out;
1237 	}
1238 
1239 	if (dev->state == STATE_DEV_SETUP) {
1240 		if (dev->setup_in || dev->setup_can_stall)
1241 			mask = EPOLLOUT;
1242 	} else {
1243 		if (dev->ev_next != 0)
1244 			mask = EPOLLIN;
1245 	}
1246 out:
1247 	spin_unlock_irq(&dev->lock);
1248 	return mask;
1249 }
1250 
gadget_dev_ioctl(struct file * fd,unsigned code,unsigned long value)1251 static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1252 {
1253 	struct dev_data		*dev = fd->private_data;
1254 	struct usb_gadget	*gadget = dev->gadget;
1255 	long ret = -ENOTTY;
1256 
1257 	spin_lock_irq(&dev->lock);
1258 	if (dev->state == STATE_DEV_OPENED ||
1259 			dev->state == STATE_DEV_UNBOUND) {
1260 		/* Not bound to a UDC */
1261 	} else if (gadget->ops->ioctl) {
1262 		++dev->udc_usage;
1263 		spin_unlock_irq(&dev->lock);
1264 
1265 		ret = gadget->ops->ioctl (gadget, code, value);
1266 
1267 		spin_lock_irq(&dev->lock);
1268 		--dev->udc_usage;
1269 	}
1270 	spin_unlock_irq(&dev->lock);
1271 
1272 	return ret;
1273 }
1274 
1275 /*----------------------------------------------------------------------*/
1276 
1277 /* The in-kernel gadget driver handles most ep0 issues, in particular
1278  * enumerating the single configuration (as provided from user space).
1279  *
1280  * Unrecognized ep0 requests may be handled in user space.
1281  */
1282 
make_qualifier(struct dev_data * dev)1283 static void make_qualifier (struct dev_data *dev)
1284 {
1285 	struct usb_qualifier_descriptor		qual;
1286 	struct usb_device_descriptor		*desc;
1287 
1288 	qual.bLength = sizeof qual;
1289 	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1290 	qual.bcdUSB = cpu_to_le16 (0x0200);
1291 
1292 	desc = dev->dev;
1293 	qual.bDeviceClass = desc->bDeviceClass;
1294 	qual.bDeviceSubClass = desc->bDeviceSubClass;
1295 	qual.bDeviceProtocol = desc->bDeviceProtocol;
1296 
1297 	/* assumes ep0 uses the same value for both speeds ... */
1298 	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1299 
1300 	qual.bNumConfigurations = 1;
1301 	qual.bRESERVED = 0;
1302 
1303 	memcpy (dev->rbuf, &qual, sizeof qual);
1304 }
1305 
1306 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1307 config_buf (struct dev_data *dev, u8 type, unsigned index)
1308 {
1309 	int		len;
1310 	int		hs = 0;
1311 
1312 	/* only one configuration */
1313 	if (index > 0)
1314 		return -EINVAL;
1315 
1316 	if (gadget_is_dualspeed(dev->gadget)) {
1317 		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1318 		if (type == USB_DT_OTHER_SPEED_CONFIG)
1319 			hs = !hs;
1320 	}
1321 	if (hs) {
1322 		dev->req->buf = dev->hs_config;
1323 		len = le16_to_cpu(dev->hs_config->wTotalLength);
1324 	} else {
1325 		dev->req->buf = dev->config;
1326 		len = le16_to_cpu(dev->config->wTotalLength);
1327 	}
1328 	((u8 *)dev->req->buf) [1] = type;
1329 	return len;
1330 }
1331 
1332 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1333 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1334 {
1335 	struct dev_data			*dev = get_gadget_data (gadget);
1336 	struct usb_request		*req = dev->req;
1337 	int				value = -EOPNOTSUPP;
1338 	struct usb_gadgetfs_event	*event;
1339 	u16				w_value = le16_to_cpu(ctrl->wValue);
1340 	u16				w_length = le16_to_cpu(ctrl->wLength);
1341 
1342 	if (w_length > RBUF_SIZE) {
1343 		if (ctrl->bRequestType & USB_DIR_IN) {
1344 			/* Cast away the const, we are going to overwrite on purpose. */
1345 			__le16 *temp = (__le16 *)&ctrl->wLength;
1346 
1347 			*temp = cpu_to_le16(RBUF_SIZE);
1348 			w_length = RBUF_SIZE;
1349 		} else {
1350 			return value;
1351 		}
1352 	}
1353 
1354 	spin_lock (&dev->lock);
1355 	dev->setup_abort = 0;
1356 	if (dev->state == STATE_DEV_UNCONNECTED) {
1357 		if (gadget_is_dualspeed(gadget)
1358 				&& gadget->speed == USB_SPEED_HIGH
1359 				&& dev->hs_config == NULL) {
1360 			spin_unlock(&dev->lock);
1361 			ERROR (dev, "no high speed config??\n");
1362 			return -EINVAL;
1363 		}
1364 
1365 		dev->state = STATE_DEV_CONNECTED;
1366 
1367 		INFO (dev, "connected\n");
1368 		event = next_event (dev, GADGETFS_CONNECT);
1369 		event->u.speed = gadget->speed;
1370 		ep0_readable (dev);
1371 
1372 	/* host may have given up waiting for response.  we can miss control
1373 	 * requests handled lower down (device/endpoint status and features);
1374 	 * then ep0_{read,write} will report the wrong status. controller
1375 	 * driver will have aborted pending i/o.
1376 	 */
1377 	} else if (dev->state == STATE_DEV_SETUP)
1378 		dev->setup_abort = 1;
1379 
1380 	req->buf = dev->rbuf;
1381 	req->context = NULL;
1382 	switch (ctrl->bRequest) {
1383 
1384 	case USB_REQ_GET_DESCRIPTOR:
1385 		if (ctrl->bRequestType != USB_DIR_IN)
1386 			goto unrecognized;
1387 		switch (w_value >> 8) {
1388 
1389 		case USB_DT_DEVICE:
1390 			value = min (w_length, (u16) sizeof *dev->dev);
1391 			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1392 			req->buf = dev->dev;
1393 			break;
1394 		case USB_DT_DEVICE_QUALIFIER:
1395 			if (!dev->hs_config)
1396 				break;
1397 			value = min (w_length, (u16)
1398 				sizeof (struct usb_qualifier_descriptor));
1399 			make_qualifier (dev);
1400 			break;
1401 		case USB_DT_OTHER_SPEED_CONFIG:
1402 		case USB_DT_CONFIG:
1403 			value = config_buf (dev,
1404 					w_value >> 8,
1405 					w_value & 0xff);
1406 			if (value >= 0)
1407 				value = min (w_length, (u16) value);
1408 			break;
1409 		case USB_DT_STRING:
1410 			goto unrecognized;
1411 
1412 		default:		// all others are errors
1413 			break;
1414 		}
1415 		break;
1416 
1417 	/* currently one config, two speeds */
1418 	case USB_REQ_SET_CONFIGURATION:
1419 		if (ctrl->bRequestType != 0)
1420 			goto unrecognized;
1421 		if (0 == (u8) w_value) {
1422 			value = 0;
1423 			dev->current_config = 0;
1424 			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1425 			// user mode expected to disable endpoints
1426 		} else {
1427 			u8	config, power;
1428 
1429 			if (gadget_is_dualspeed(gadget)
1430 					&& gadget->speed == USB_SPEED_HIGH) {
1431 				config = dev->hs_config->bConfigurationValue;
1432 				power = dev->hs_config->bMaxPower;
1433 			} else {
1434 				config = dev->config->bConfigurationValue;
1435 				power = dev->config->bMaxPower;
1436 			}
1437 
1438 			if (config == (u8) w_value) {
1439 				value = 0;
1440 				dev->current_config = config;
1441 				usb_gadget_vbus_draw(gadget, 2 * power);
1442 			}
1443 		}
1444 
1445 		/* report SET_CONFIGURATION like any other control request,
1446 		 * except that usermode may not stall this.  the next
1447 		 * request mustn't be allowed start until this finishes:
1448 		 * endpoints and threads set up, etc.
1449 		 *
1450 		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1451 		 * has bad/racey automagic that prevents synchronizing here.
1452 		 * even kernel mode drivers often miss them.
1453 		 */
1454 		if (value == 0) {
1455 			INFO (dev, "configuration #%d\n", dev->current_config);
1456 			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1457 			if (dev->usermode_setup) {
1458 				dev->setup_can_stall = 0;
1459 				goto delegate;
1460 			}
1461 		}
1462 		break;
1463 
1464 #ifndef	CONFIG_USB_PXA25X
1465 	/* PXA automagically handles this request too */
1466 	case USB_REQ_GET_CONFIGURATION:
1467 		if (ctrl->bRequestType != 0x80)
1468 			goto unrecognized;
1469 		*(u8 *)req->buf = dev->current_config;
1470 		value = min (w_length, (u16) 1);
1471 		break;
1472 #endif
1473 
1474 	default:
1475 unrecognized:
1476 		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1477 			dev->usermode_setup ? "delegate" : "fail",
1478 			ctrl->bRequestType, ctrl->bRequest,
1479 			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1480 
1481 		/* if there's an ep0 reader, don't stall */
1482 		if (dev->usermode_setup) {
1483 			dev->setup_can_stall = 1;
1484 delegate:
1485 			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1486 						? 1 : 0;
1487 			dev->setup_wLength = w_length;
1488 			dev->setup_out_ready = 0;
1489 			dev->setup_out_error = 0;
1490 
1491 			/* read DATA stage for OUT right away */
1492 			if (unlikely (!dev->setup_in && w_length)) {
1493 				value = setup_req (gadget->ep0, dev->req,
1494 							w_length);
1495 				if (value < 0)
1496 					break;
1497 
1498 				++dev->udc_usage;
1499 				spin_unlock (&dev->lock);
1500 				value = usb_ep_queue (gadget->ep0, dev->req,
1501 							GFP_KERNEL);
1502 				spin_lock (&dev->lock);
1503 				--dev->udc_usage;
1504 				if (value < 0) {
1505 					clean_req (gadget->ep0, dev->req);
1506 					break;
1507 				}
1508 
1509 				/* we can't currently stall these */
1510 				dev->setup_can_stall = 0;
1511 			}
1512 
1513 			/* state changes when reader collects event */
1514 			event = next_event (dev, GADGETFS_SETUP);
1515 			event->u.setup = *ctrl;
1516 			ep0_readable (dev);
1517 			spin_unlock (&dev->lock);
1518 			/*
1519 			 * Return USB_GADGET_DELAYED_STATUS as a workaround to
1520 			 * stop some UDC drivers (e.g. dwc3) from automatically
1521 			 * proceeding with the status stage for 0-length
1522 			 * transfers.
1523 			 * Should be removed once all UDC drivers are fixed to
1524 			 * always delay the status stage until a response is
1525 			 * queued to EP0.
1526 			 */
1527 			return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
1528 		}
1529 	}
1530 
1531 	/* proceed with data transfer and status phases? */
1532 	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1533 		req->length = value;
1534 		req->zero = value < w_length;
1535 
1536 		++dev->udc_usage;
1537 		spin_unlock (&dev->lock);
1538 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1539 		spin_lock(&dev->lock);
1540 		--dev->udc_usage;
1541 		spin_unlock(&dev->lock);
1542 		if (value < 0) {
1543 			DBG (dev, "ep_queue --> %d\n", value);
1544 			req->status = 0;
1545 		}
1546 		return value;
1547 	}
1548 
1549 	/* device stalls when value < 0 */
1550 	spin_unlock (&dev->lock);
1551 	return value;
1552 }
1553 
destroy_ep_files(struct dev_data * dev)1554 static void destroy_ep_files (struct dev_data *dev)
1555 {
1556 	DBG (dev, "%s %d\n", __func__, dev->state);
1557 
1558 	/* dev->state must prevent interference */
1559 	spin_lock_irq (&dev->lock);
1560 	while (!list_empty(&dev->epfiles)) {
1561 		struct ep_data	*ep;
1562 
1563 		/* break link to FS */
1564 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1565 		list_del_init (&ep->epfiles);
1566 		spin_unlock_irq (&dev->lock);
1567 
1568 		/* break link to controller */
1569 		mutex_lock(&ep->lock);
1570 		if (ep->state == STATE_EP_ENABLED)
1571 			(void) usb_ep_disable (ep->ep);
1572 		ep->state = STATE_EP_UNBOUND;
1573 		usb_ep_free_request (ep->ep, ep->req);
1574 		ep->ep = NULL;
1575 		mutex_unlock(&ep->lock);
1576 
1577 		wake_up (&ep->wait);
1578 
1579 		/* break link to dcache */
1580 		simple_remove_by_name(dev->sb->s_root, ep->name, NULL);
1581 
1582 		put_ep (ep);
1583 
1584 		spin_lock_irq (&dev->lock);
1585 	}
1586 	spin_unlock_irq (&dev->lock);
1587 }
1588 
1589 
1590 static int gadgetfs_create_file (struct super_block *sb, char const *name,
1591 		void *data, const struct file_operations *fops);
1592 
activate_ep_files(struct dev_data * dev)1593 static int activate_ep_files (struct dev_data *dev)
1594 {
1595 	struct usb_ep	*ep;
1596 	struct ep_data	*data;
1597 	int err;
1598 
1599 	gadget_for_each_ep (ep, dev->gadget) {
1600 
1601 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1602 		if (!data)
1603 			goto enomem0;
1604 		data->state = STATE_EP_DISABLED;
1605 		mutex_init(&data->lock);
1606 		init_waitqueue_head (&data->wait);
1607 
1608 		strscpy(data->name, ep->name);
1609 		refcount_set (&data->count, 1);
1610 		data->dev = dev;
1611 		get_dev (dev);
1612 
1613 		data->ep = ep;
1614 		ep->driver_data = data;
1615 
1616 		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1617 		if (!data->req)
1618 			goto enomem1;
1619 
1620 		err = gadgetfs_create_file (dev->sb, data->name,
1621 				data, &ep_io_operations);
1622 		if (err)
1623 			goto enomem2;
1624 		list_add_tail (&data->epfiles, &dev->epfiles);
1625 	}
1626 	return 0;
1627 
1628 enomem2:
1629 	usb_ep_free_request (ep, data->req);
1630 enomem1:
1631 	put_dev (dev);
1632 	kfree (data);
1633 enomem0:
1634 	DBG (dev, "%s enomem\n", __func__);
1635 	destroy_ep_files (dev);
1636 	return -ENOMEM;
1637 }
1638 
1639 static void
gadgetfs_unbind(struct usb_gadget * gadget)1640 gadgetfs_unbind (struct usb_gadget *gadget)
1641 {
1642 	struct dev_data		*dev = get_gadget_data (gadget);
1643 
1644 	DBG (dev, "%s\n", __func__);
1645 
1646 	spin_lock_irq (&dev->lock);
1647 	dev->state = STATE_DEV_UNBOUND;
1648 	while (dev->udc_usage > 0) {
1649 		spin_unlock_irq(&dev->lock);
1650 		usleep_range(1000, 2000);
1651 		spin_lock_irq(&dev->lock);
1652 	}
1653 	spin_unlock_irq (&dev->lock);
1654 
1655 	destroy_ep_files (dev);
1656 	gadget->ep0->driver_data = NULL;
1657 	set_gadget_data (gadget, NULL);
1658 
1659 	/* we've already been disconnected ... no i/o is active */
1660 	if (dev->req)
1661 		usb_ep_free_request (gadget->ep0, dev->req);
1662 	DBG (dev, "%s done\n", __func__);
1663 	put_dev (dev);
1664 }
1665 
1666 static struct dev_data		*the_device;
1667 
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1668 static int gadgetfs_bind(struct usb_gadget *gadget,
1669 		struct usb_gadget_driver *driver)
1670 {
1671 	struct dev_data		*dev = the_device;
1672 
1673 	if (!dev)
1674 		return -ESRCH;
1675 	if (0 != strcmp (CHIP, gadget->name)) {
1676 		pr_err("%s expected %s controller not %s\n",
1677 			shortname, CHIP, gadget->name);
1678 		return -ENODEV;
1679 	}
1680 
1681 	set_gadget_data (gadget, dev);
1682 	dev->gadget = gadget;
1683 	gadget->ep0->driver_data = dev;
1684 
1685 	/* preallocate control response and buffer */
1686 	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1687 	if (!dev->req)
1688 		goto enomem;
1689 	dev->req->context = NULL;
1690 	dev->req->complete = epio_complete;
1691 
1692 	if (activate_ep_files (dev) < 0)
1693 		goto enomem;
1694 
1695 	INFO (dev, "bound to %s driver\n", gadget->name);
1696 	spin_lock_irq(&dev->lock);
1697 	dev->state = STATE_DEV_UNCONNECTED;
1698 	spin_unlock_irq(&dev->lock);
1699 	get_dev (dev);
1700 	return 0;
1701 
1702 enomem:
1703 	gadgetfs_unbind (gadget);
1704 	return -ENOMEM;
1705 }
1706 
1707 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1708 gadgetfs_disconnect (struct usb_gadget *gadget)
1709 {
1710 	struct dev_data		*dev = get_gadget_data (gadget);
1711 	unsigned long		flags;
1712 
1713 	spin_lock_irqsave (&dev->lock, flags);
1714 	if (dev->state == STATE_DEV_UNCONNECTED)
1715 		goto exit;
1716 	dev->state = STATE_DEV_UNCONNECTED;
1717 
1718 	INFO (dev, "disconnected\n");
1719 	next_event (dev, GADGETFS_DISCONNECT);
1720 	ep0_readable (dev);
1721 exit:
1722 	spin_unlock_irqrestore (&dev->lock, flags);
1723 }
1724 
1725 static void
gadgetfs_suspend(struct usb_gadget * gadget)1726 gadgetfs_suspend (struct usb_gadget *gadget)
1727 {
1728 	struct dev_data		*dev = get_gadget_data (gadget);
1729 	unsigned long		flags;
1730 
1731 	INFO (dev, "suspended from state %d\n", dev->state);
1732 	spin_lock_irqsave(&dev->lock, flags);
1733 	switch (dev->state) {
1734 	case STATE_DEV_SETUP:		// VERY odd... host died??
1735 	case STATE_DEV_CONNECTED:
1736 	case STATE_DEV_UNCONNECTED:
1737 		next_event (dev, GADGETFS_SUSPEND);
1738 		ep0_readable (dev);
1739 		fallthrough;
1740 	default:
1741 		break;
1742 	}
1743 	spin_unlock_irqrestore(&dev->lock, flags);
1744 }
1745 
1746 static struct usb_gadget_driver gadgetfs_driver = {
1747 	.function	= (char *) driver_desc,
1748 	.bind		= gadgetfs_bind,
1749 	.unbind		= gadgetfs_unbind,
1750 	.setup		= gadgetfs_setup,
1751 	.reset		= gadgetfs_disconnect,
1752 	.disconnect	= gadgetfs_disconnect,
1753 	.suspend	= gadgetfs_suspend,
1754 
1755 	.driver	= {
1756 		.name		= shortname,
1757 	},
1758 };
1759 
1760 /*----------------------------------------------------------------------*/
1761 /* DEVICE INITIALIZATION
1762  *
1763  *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1764  *     status = write (fd, descriptors, sizeof descriptors)
1765  *
1766  * That write establishes the device configuration, so the kernel can
1767  * bind to the controller ... guaranteeing it can handle enumeration
1768  * at all necessary speeds.  Descriptor order is:
1769  *
1770  * . message tag (u32, host order) ... for now, must be zero; it
1771  *	would change to support features like multi-config devices
1772  * . full/low speed config ... all wTotalLength bytes (with interface,
1773  *	class, altsetting, endpoint, and other descriptors)
1774  * . high speed config ... all descriptors, for high speed operation;
1775  *	this one's optional except for high-speed hardware
1776  * . device descriptor
1777  *
1778  * Endpoints are not yet enabled. Drivers must wait until device
1779  * configuration and interface altsetting changes create
1780  * the need to configure (or unconfigure) them.
1781  *
1782  * After initialization, the device stays active for as long as that
1783  * $CHIP file is open.  Events must then be read from that descriptor,
1784  * such as configuration notifications.
1785  */
1786 
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1787 static int is_valid_config(struct usb_config_descriptor *config,
1788 		unsigned int total)
1789 {
1790 	return config->bDescriptorType == USB_DT_CONFIG
1791 		&& config->bLength == USB_DT_CONFIG_SIZE
1792 		&& total >= USB_DT_CONFIG_SIZE
1793 		&& config->bConfigurationValue != 0
1794 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1795 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1796 	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1797 	/* FIXME check lengths: walk to end */
1798 }
1799 
1800 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1801 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1802 {
1803 	struct dev_data		*dev = fd->private_data;
1804 	ssize_t			value, length = len;
1805 	unsigned		total;
1806 	u32			tag;
1807 	char			*kbuf;
1808 
1809 	spin_lock_irq(&dev->lock);
1810 	if (dev->state > STATE_DEV_OPENED) {
1811 		value = ep0_write(fd, buf, len, ptr);
1812 		spin_unlock_irq(&dev->lock);
1813 		return value;
1814 	}
1815 	spin_unlock_irq(&dev->lock);
1816 
1817 	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1818 	    (len > PAGE_SIZE * 4))
1819 		return -EINVAL;
1820 
1821 	/* we might need to change message format someday */
1822 	if (copy_from_user (&tag, buf, 4))
1823 		return -EFAULT;
1824 	if (tag != 0)
1825 		return -EINVAL;
1826 	buf += 4;
1827 	length -= 4;
1828 
1829 	kbuf = memdup_user(buf, length);
1830 	if (IS_ERR(kbuf))
1831 		return PTR_ERR(kbuf);
1832 
1833 	spin_lock_irq (&dev->lock);
1834 	value = -EINVAL;
1835 	if (dev->buf) {
1836 		spin_unlock_irq(&dev->lock);
1837 		kfree(kbuf);
1838 		return value;
1839 	}
1840 	dev->buf = kbuf;
1841 
1842 	/* full or low speed config */
1843 	dev->config = (void *) kbuf;
1844 	total = le16_to_cpu(dev->config->wTotalLength);
1845 	if (!is_valid_config(dev->config, total) ||
1846 			total > length - USB_DT_DEVICE_SIZE)
1847 		goto fail;
1848 	kbuf += total;
1849 	length -= total;
1850 
1851 	/* optional high speed config */
1852 	if (kbuf [1] == USB_DT_CONFIG) {
1853 		dev->hs_config = (void *) kbuf;
1854 		total = le16_to_cpu(dev->hs_config->wTotalLength);
1855 		if (!is_valid_config(dev->hs_config, total) ||
1856 				total > length - USB_DT_DEVICE_SIZE)
1857 			goto fail;
1858 		kbuf += total;
1859 		length -= total;
1860 	} else {
1861 		dev->hs_config = NULL;
1862 	}
1863 
1864 	/* could support multiple configs, using another encoding! */
1865 
1866 	/* device descriptor (tweaked for paranoia) */
1867 	if (length != USB_DT_DEVICE_SIZE)
1868 		goto fail;
1869 	dev->dev = (void *)kbuf;
1870 	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1871 			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1872 			|| dev->dev->bNumConfigurations != 1)
1873 		goto fail;
1874 	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1875 
1876 	/* triggers gadgetfs_bind(); then we can enumerate. */
1877 	spin_unlock_irq (&dev->lock);
1878 	if (dev->hs_config)
1879 		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1880 	else
1881 		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1882 
1883 	value = usb_gadget_register_driver(&gadgetfs_driver);
1884 	if (value != 0) {
1885 		spin_lock_irq(&dev->lock);
1886 		goto fail;
1887 	} else {
1888 		/* at this point "good" hardware has for the first time
1889 		 * let the USB the host see us.  alternatively, if users
1890 		 * unplug/replug that will clear all the error state.
1891 		 *
1892 		 * note:  everything running before here was guaranteed
1893 		 * to choke driver model style diagnostics.  from here
1894 		 * on, they can work ... except in cleanup paths that
1895 		 * kick in after the ep0 descriptor is closed.
1896 		 */
1897 		value = len;
1898 		dev->gadget_registered = true;
1899 	}
1900 	return value;
1901 
1902 fail:
1903 	dev->config = NULL;
1904 	dev->hs_config = NULL;
1905 	dev->dev = NULL;
1906 	spin_unlock_irq (&dev->lock);
1907 	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1908 	kfree (dev->buf);
1909 	dev->buf = NULL;
1910 	return value;
1911 }
1912 
1913 static int
gadget_dev_open(struct inode * inode,struct file * fd)1914 gadget_dev_open (struct inode *inode, struct file *fd)
1915 {
1916 	struct dev_data		*dev = inode->i_private;
1917 	int			value = -EBUSY;
1918 
1919 	spin_lock_irq(&dev->lock);
1920 	if (dev->state == STATE_DEV_DISABLED) {
1921 		dev->ev_next = 0;
1922 		dev->state = STATE_DEV_OPENED;
1923 		fd->private_data = dev;
1924 		get_dev (dev);
1925 		value = 0;
1926 	}
1927 	spin_unlock_irq(&dev->lock);
1928 	return value;
1929 }
1930 
1931 static const struct file_operations ep0_operations = {
1932 
1933 	.open =		gadget_dev_open,
1934 	.read =		ep0_read,
1935 	.write =	dev_config,
1936 	.fasync =	ep0_fasync,
1937 	.poll =		ep0_poll,
1938 	.unlocked_ioctl = gadget_dev_ioctl,
1939 	.release =	dev_release,
1940 };
1941 
1942 /*----------------------------------------------------------------------*/
1943 
1944 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1945  *
1946  * Mounting the filesystem creates a controller file, used first for
1947  * device configuration then later for event monitoring.
1948  */
1949 
1950 
1951 /* FIXME PAM etc could set this security policy without mount options
1952  * if epfiles inherited ownership and permissons from ep0 ...
1953  */
1954 
1955 static unsigned default_uid;
1956 static unsigned default_gid;
1957 static unsigned default_perm = S_IRUSR | S_IWUSR;
1958 
1959 module_param (default_uid, uint, 0644);
1960 module_param (default_gid, uint, 0644);
1961 module_param (default_perm, uint, 0644);
1962 
1963 
1964 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1965 gadgetfs_make_inode (struct super_block *sb,
1966 		void *data, const struct file_operations *fops,
1967 		int mode)
1968 {
1969 	struct inode *inode = new_inode (sb);
1970 
1971 	if (inode) {
1972 		inode->i_ino = get_next_ino();
1973 		inode->i_mode = mode;
1974 		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1975 		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1976 		simple_inode_init_ts(inode);
1977 		inode->i_private = data;
1978 		inode->i_fop = fops;
1979 	}
1980 	return inode;
1981 }
1982 
1983 /* creates in fs root directory, so non-renamable and non-linkable.
1984  * so inode and dentry are paired, until device reconfig.
1985  */
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1986 static int gadgetfs_create_file (struct super_block *sb, char const *name,
1987 		void *data, const struct file_operations *fops)
1988 {
1989 	struct dentry	*dentry;
1990 	struct inode	*inode;
1991 
1992 	inode = gadgetfs_make_inode (sb, data, fops,
1993 			S_IFREG | (default_perm & S_IRWXUGO));
1994 	if (!inode)
1995 		return -ENOMEM;
1996 
1997 	dentry = simple_start_creating(sb->s_root, name);
1998 	if (IS_ERR(dentry)) {
1999 		iput(inode);
2000 		return PTR_ERR(dentry);
2001 	}
2002 
2003 	d_make_persistent(dentry, inode);
2004 
2005 	simple_done_creating(dentry);
2006 	return 0;
2007 }
2008 
2009 static const struct super_operations gadget_fs_operations = {
2010 	.statfs =	simple_statfs,
2011 	.drop_inode =	inode_just_drop,
2012 };
2013 
2014 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2015 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2016 {
2017 	struct inode	*inode;
2018 	struct dev_data	*dev;
2019 	int		rc;
2020 
2021 	mutex_lock(&sb_mutex);
2022 
2023 	if (the_device) {
2024 		rc = -ESRCH;
2025 		goto Done;
2026 	}
2027 
2028 	CHIP = usb_get_gadget_udc_name();
2029 	if (!CHIP) {
2030 		rc = -ENODEV;
2031 		goto Done;
2032 	}
2033 
2034 	/* superblock */
2035 	sb->s_blocksize = PAGE_SIZE;
2036 	sb->s_blocksize_bits = PAGE_SHIFT;
2037 	sb->s_magic = GADGETFS_MAGIC;
2038 	sb->s_op = &gadget_fs_operations;
2039 	sb->s_time_gran = 1;
2040 
2041 	/* root inode */
2042 	inode = gadgetfs_make_inode (sb,
2043 			NULL, &simple_dir_operations,
2044 			S_IFDIR | S_IRUGO | S_IXUGO);
2045 	if (!inode)
2046 		goto Enomem;
2047 	inode->i_op = &simple_dir_inode_operations;
2048 	if (!(sb->s_root = d_make_root (inode)))
2049 		goto Enomem;
2050 
2051 	/* the ep0 file is named after the controller we expect;
2052 	 * user mode code can use it for sanity checks, like we do.
2053 	 */
2054 	dev = dev_new ();
2055 	if (!dev)
2056 		goto Enomem;
2057 
2058 	dev->sb = sb;
2059 	rc = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2060 	if (rc) {
2061 		put_dev(dev);
2062 		goto Enomem;
2063 	}
2064 
2065 	/* other endpoint files are available after hardware setup,
2066 	 * from binding to a controller.
2067 	 */
2068 	the_device = dev;
2069 	rc = 0;
2070 	goto Done;
2071 
2072  Enomem:
2073 	kfree(CHIP);
2074 	CHIP = NULL;
2075 	rc = -ENOMEM;
2076 
2077  Done:
2078 	mutex_unlock(&sb_mutex);
2079 	return rc;
2080 }
2081 
2082 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2083 static int gadgetfs_get_tree(struct fs_context *fc)
2084 {
2085 	return get_tree_single(fc, gadgetfs_fill_super);
2086 }
2087 
2088 static const struct fs_context_operations gadgetfs_context_ops = {
2089 	.get_tree	= gadgetfs_get_tree,
2090 };
2091 
gadgetfs_init_fs_context(struct fs_context * fc)2092 static int gadgetfs_init_fs_context(struct fs_context *fc)
2093 {
2094 	fc->ops = &gadgetfs_context_ops;
2095 	return 0;
2096 }
2097 
2098 static void
gadgetfs_kill_sb(struct super_block * sb)2099 gadgetfs_kill_sb (struct super_block *sb)
2100 {
2101 	mutex_lock(&sb_mutex);
2102 	kill_anon_super (sb);
2103 	if (the_device) {
2104 		put_dev (the_device);
2105 		the_device = NULL;
2106 	}
2107 	kfree(CHIP);
2108 	CHIP = NULL;
2109 	mutex_unlock(&sb_mutex);
2110 }
2111 
2112 /*----------------------------------------------------------------------*/
2113 
2114 static struct file_system_type gadgetfs_type = {
2115 	.owner		= THIS_MODULE,
2116 	.name		= shortname,
2117 	.init_fs_context = gadgetfs_init_fs_context,
2118 	.kill_sb	= gadgetfs_kill_sb,
2119 };
2120 MODULE_ALIAS_FS("gadgetfs");
2121 
2122 /*----------------------------------------------------------------------*/
2123 
gadgetfs_init(void)2124 static int __init gadgetfs_init (void)
2125 {
2126 	int status;
2127 
2128 	status = register_filesystem (&gadgetfs_type);
2129 	if (status == 0)
2130 		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2131 			shortname, driver_desc);
2132 	return status;
2133 }
2134 module_init (gadgetfs_init);
2135 
gadgetfs_cleanup(void)2136 static void __exit gadgetfs_cleanup (void)
2137 {
2138 	pr_debug ("unregister %s\n", shortname);
2139 	unregister_filesystem (&gadgetfs_type);
2140 }
2141 module_exit (gadgetfs_cleanup);
2142 
2143