xref: /linux/drivers/usb/gadget/udc/pxa27x_udc.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Handles the Intel 27x USB Device Controller (UDC)
3  *
4  * Inspired by original driver by Frank Becker, David Brownell, and others.
5  * Copyright (C) 2008 Robert Jarzmik
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
22 #include <linux/clk.h>
23 #include <linux/irq.h>
24 #include <linux/gpio.h>
25 #include <linux/gpio/consumer.h>
26 #include <linux/slab.h>
27 #include <linux/prefetch.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/platform_data/pxa2xx_udc.h>
30 #include <linux/of_device.h>
31 #include <linux/of_gpio.h>
32 
33 #include <linux/usb.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 
37 #include "pxa27x_udc.h"
38 
39 /*
40  * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
41  * series processors.
42  *
43  * Such controller drivers work with a gadget driver.  The gadget driver
44  * returns descriptors, implements configuration and data protocols used
45  * by the host to interact with this device, and allocates endpoints to
46  * the different protocol interfaces.  The controller driver virtualizes
47  * usb hardware so that the gadget drivers will be more portable.
48  *
49  * This UDC hardware wants to implement a bit too much USB protocol. The
50  * biggest issues are:  that the endpoints have to be set up before the
51  * controller can be enabled (minor, and not uncommon); and each endpoint
52  * can only have one configuration, interface and alternative interface
53  * number (major, and very unusual). Once set up, these cannot be changed
54  * without a controller reset.
55  *
56  * The workaround is to setup all combinations necessary for the gadgets which
57  * will work with this driver. This is done in pxa_udc structure, statically.
58  * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
59  * (You could modify this if needed.  Some drivers have a "fifo_mode" module
60  * parameter to facilitate such changes.)
61  *
62  * The combinations have been tested with these gadgets :
63  *  - zero gadget
64  *  - file storage gadget
65  *  - ether gadget
66  *
67  * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
68  * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
69  *
70  * All the requests are handled the same way :
71  *  - the drivers tries to handle the request directly to the IO
72  *  - if the IO fifo is not big enough, the remaining is send/received in
73  *    interrupt handling.
74  */
75 
76 #define	DRIVER_VERSION	"2008-04-18"
77 #define	DRIVER_DESC	"PXA 27x USB Device Controller driver"
78 
79 static const char driver_name[] = "pxa27x_udc";
80 static struct pxa_udc *the_controller;
81 
82 static void handle_ep(struct pxa_ep *ep);
83 
84 /*
85  * Debug filesystem
86  */
87 #ifdef CONFIG_USB_GADGET_DEBUG_FS
88 
89 #include <linux/debugfs.h>
90 #include <linux/uaccess.h>
91 #include <linux/seq_file.h>
92 
93 static int state_dbg_show(struct seq_file *s, void *p)
94 {
95 	struct pxa_udc *udc = s->private;
96 	u32 tmp;
97 
98 	if (!udc->driver)
99 		return -ENODEV;
100 
101 	/* basic device status */
102 	seq_printf(s, DRIVER_DESC "\n"
103 		   "%s version: %s\n"
104 		   "Gadget driver: %s\n",
105 		   driver_name, DRIVER_VERSION,
106 		   udc->driver ? udc->driver->driver.name : "(none)");
107 
108 	tmp = udc_readl(udc, UDCCR);
109 	seq_printf(s,
110 		   "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n",
111 		   tmp,
112 		   (tmp & UDCCR_OEN) ? " oen":"",
113 		   (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
114 		   (tmp & UDCCR_AHNP) ? " rem" : "",
115 		   (tmp & UDCCR_BHNP) ? " rstir" : "",
116 		   (tmp & UDCCR_DWRE) ? " dwre" : "",
117 		   (tmp & UDCCR_SMAC) ? " smac" : "",
118 		   (tmp & UDCCR_EMCE) ? " emce" : "",
119 		   (tmp & UDCCR_UDR) ? " udr" : "",
120 		   (tmp & UDCCR_UDA) ? " uda" : "",
121 		   (tmp & UDCCR_UDE) ? " ude" : "",
122 		   (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
123 		   (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
124 		   (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
125 	/* registers for device and ep0 */
126 	seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
127 		   udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
128 	seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
129 		   udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
130 	seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
131 	seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n",
132 		   udc->stats.irqs_reset, udc->stats.irqs_suspend,
133 		   udc->stats.irqs_resume, udc->stats.irqs_reconfig);
134 
135 	return 0;
136 }
137 
138 static int queues_dbg_show(struct seq_file *s, void *p)
139 {
140 	struct pxa_udc *udc = s->private;
141 	struct pxa_ep *ep;
142 	struct pxa27x_request *req;
143 	int i, maxpkt;
144 
145 	if (!udc->driver)
146 		return -ENODEV;
147 
148 	/* dump endpoint queues */
149 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
150 		ep = &udc->pxa_ep[i];
151 		maxpkt = ep->fifo_size;
152 		seq_printf(s,  "%-12s max_pkt=%d %s\n",
153 			   EPNAME(ep), maxpkt, "pio");
154 
155 		if (list_empty(&ep->queue)) {
156 			seq_puts(s, "\t(nothing queued)\n");
157 			continue;
158 		}
159 
160 		list_for_each_entry(req, &ep->queue, queue) {
161 			seq_printf(s,  "\treq %p len %d/%d buf %p\n",
162 				   &req->req, req->req.actual,
163 				   req->req.length, req->req.buf);
164 		}
165 	}
166 
167 	return 0;
168 }
169 
170 static int eps_dbg_show(struct seq_file *s, void *p)
171 {
172 	struct pxa_udc *udc = s->private;
173 	struct pxa_ep *ep;
174 	int i;
175 	u32 tmp;
176 
177 	if (!udc->driver)
178 		return -ENODEV;
179 
180 	ep = &udc->pxa_ep[0];
181 	tmp = udc_ep_readl(ep, UDCCSR);
182 	seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n",
183 		   tmp,
184 		   (tmp & UDCCSR0_SA) ? " sa" : "",
185 		   (tmp & UDCCSR0_RNE) ? " rne" : "",
186 		   (tmp & UDCCSR0_FST) ? " fst" : "",
187 		   (tmp & UDCCSR0_SST) ? " sst" : "",
188 		   (tmp & UDCCSR0_DME) ? " dme" : "",
189 		   (tmp & UDCCSR0_IPR) ? " ipr" : "",
190 		   (tmp & UDCCSR0_OPC) ? " opc" : "");
191 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
192 		ep = &udc->pxa_ep[i];
193 		tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
194 		seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n",
195 			   EPNAME(ep),
196 			   ep->stats.in_bytes, ep->stats.in_ops,
197 			   ep->stats.out_bytes, ep->stats.out_ops,
198 			   ep->stats.irqs,
199 			   tmp, udc_ep_readl(ep, UDCCSR),
200 			   udc_ep_readl(ep, UDCBCR));
201 	}
202 
203 	return 0;
204 }
205 
206 static int eps_dbg_open(struct inode *inode, struct file *file)
207 {
208 	return single_open(file, eps_dbg_show, inode->i_private);
209 }
210 
211 static int queues_dbg_open(struct inode *inode, struct file *file)
212 {
213 	return single_open(file, queues_dbg_show, inode->i_private);
214 }
215 
216 static int state_dbg_open(struct inode *inode, struct file *file)
217 {
218 	return single_open(file, state_dbg_show, inode->i_private);
219 }
220 
221 static const struct file_operations state_dbg_fops = {
222 	.owner		= THIS_MODULE,
223 	.open		= state_dbg_open,
224 	.llseek		= seq_lseek,
225 	.read		= seq_read,
226 	.release	= single_release,
227 };
228 
229 static const struct file_operations queues_dbg_fops = {
230 	.owner		= THIS_MODULE,
231 	.open		= queues_dbg_open,
232 	.llseek		= seq_lseek,
233 	.read		= seq_read,
234 	.release	= single_release,
235 };
236 
237 static const struct file_operations eps_dbg_fops = {
238 	.owner		= THIS_MODULE,
239 	.open		= eps_dbg_open,
240 	.llseek		= seq_lseek,
241 	.read		= seq_read,
242 	.release	= single_release,
243 };
244 
245 static void pxa_init_debugfs(struct pxa_udc *udc)
246 {
247 	struct dentry *root, *state, *queues, *eps;
248 
249 	root = debugfs_create_dir(udc->gadget.name, NULL);
250 	if (IS_ERR(root) || !root)
251 		goto err_root;
252 
253 	state = debugfs_create_file("udcstate", 0400, root, udc,
254 			&state_dbg_fops);
255 	if (!state)
256 		goto err_state;
257 	queues = debugfs_create_file("queues", 0400, root, udc,
258 			&queues_dbg_fops);
259 	if (!queues)
260 		goto err_queues;
261 	eps = debugfs_create_file("epstate", 0400, root, udc,
262 			&eps_dbg_fops);
263 	if (!eps)
264 		goto err_eps;
265 
266 	udc->debugfs_root = root;
267 	udc->debugfs_state = state;
268 	udc->debugfs_queues = queues;
269 	udc->debugfs_eps = eps;
270 	return;
271 err_eps:
272 	debugfs_remove(eps);
273 err_queues:
274 	debugfs_remove(queues);
275 err_state:
276 	debugfs_remove(root);
277 err_root:
278 	dev_err(udc->dev, "debugfs is not available\n");
279 }
280 
281 static void pxa_cleanup_debugfs(struct pxa_udc *udc)
282 {
283 	debugfs_remove(udc->debugfs_eps);
284 	debugfs_remove(udc->debugfs_queues);
285 	debugfs_remove(udc->debugfs_state);
286 	debugfs_remove(udc->debugfs_root);
287 	udc->debugfs_eps = NULL;
288 	udc->debugfs_queues = NULL;
289 	udc->debugfs_state = NULL;
290 	udc->debugfs_root = NULL;
291 }
292 
293 #else
294 static inline void pxa_init_debugfs(struct pxa_udc *udc)
295 {
296 }
297 
298 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
299 {
300 }
301 #endif
302 
303 /**
304  * is_match_usb_pxa - check if usb_ep and pxa_ep match
305  * @udc_usb_ep: usb endpoint
306  * @ep: pxa endpoint
307  * @config: configuration required in pxa_ep
308  * @interface: interface required in pxa_ep
309  * @altsetting: altsetting required in pxa_ep
310  *
311  * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
312  */
313 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
314 		int config, int interface, int altsetting)
315 {
316 	if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
317 		return 0;
318 	if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
319 		return 0;
320 	if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
321 		return 0;
322 	if ((ep->config != config) || (ep->interface != interface)
323 			|| (ep->alternate != altsetting))
324 		return 0;
325 	return 1;
326 }
327 
328 /**
329  * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
330  * @udc: pxa udc
331  * @udc_usb_ep: udc_usb_ep structure
332  *
333  * Match udc_usb_ep and all pxa_ep available, to see if one matches.
334  * This is necessary because of the strong pxa hardware restriction requiring
335  * that once pxa endpoints are initialized, their configuration is freezed, and
336  * no change can be made to their address, direction, or in which configuration,
337  * interface or altsetting they are active ... which differs from more usual
338  * models which have endpoints be roughly just addressable fifos, and leave
339  * configuration events up to gadget drivers (like all control messages).
340  *
341  * Note that there is still a blurred point here :
342  *   - we rely on UDCCR register "active interface" and "active altsetting".
343  *     This is a nonsense in regard of USB spec, where multiple interfaces are
344  *     active at the same time.
345  *   - if we knew for sure that the pxa can handle multiple interface at the
346  *     same time, assuming Intel's Developer Guide is wrong, this function
347  *     should be reviewed, and a cache of couples (iface, altsetting) should
348  *     be kept in the pxa_udc structure. In this case this function would match
349  *     against the cache of couples instead of the "last altsetting" set up.
350  *
351  * Returns the matched pxa_ep structure or NULL if none found
352  */
353 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
354 		struct udc_usb_ep *udc_usb_ep)
355 {
356 	int i;
357 	struct pxa_ep *ep;
358 	int cfg = udc->config;
359 	int iface = udc->last_interface;
360 	int alt = udc->last_alternate;
361 
362 	if (udc_usb_ep == &udc->udc_usb_ep[0])
363 		return &udc->pxa_ep[0];
364 
365 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
366 		ep = &udc->pxa_ep[i];
367 		if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
368 			return ep;
369 	}
370 	return NULL;
371 }
372 
373 /**
374  * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
375  * @udc: pxa udc
376  *
377  * Context: in_interrupt()
378  *
379  * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
380  * previously set up (and is not NULL). The update is necessary is a
381  * configuration change or altsetting change was issued by the USB host.
382  */
383 static void update_pxa_ep_matches(struct pxa_udc *udc)
384 {
385 	int i;
386 	struct udc_usb_ep *udc_usb_ep;
387 
388 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
389 		udc_usb_ep = &udc->udc_usb_ep[i];
390 		if (udc_usb_ep->pxa_ep)
391 			udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
392 	}
393 }
394 
395 /**
396  * pio_irq_enable - Enables irq generation for one endpoint
397  * @ep: udc endpoint
398  */
399 static void pio_irq_enable(struct pxa_ep *ep)
400 {
401 	struct pxa_udc *udc = ep->dev;
402 	int index = EPIDX(ep);
403 	u32 udcicr0 = udc_readl(udc, UDCICR0);
404 	u32 udcicr1 = udc_readl(udc, UDCICR1);
405 
406 	if (index < 16)
407 		udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
408 	else
409 		udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
410 }
411 
412 /**
413  * pio_irq_disable - Disables irq generation for one endpoint
414  * @ep: udc endpoint
415  */
416 static void pio_irq_disable(struct pxa_ep *ep)
417 {
418 	struct pxa_udc *udc = ep->dev;
419 	int index = EPIDX(ep);
420 	u32 udcicr0 = udc_readl(udc, UDCICR0);
421 	u32 udcicr1 = udc_readl(udc, UDCICR1);
422 
423 	if (index < 16)
424 		udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
425 	else
426 		udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
427 }
428 
429 /**
430  * udc_set_mask_UDCCR - set bits in UDCCR
431  * @udc: udc device
432  * @mask: bits to set in UDCCR
433  *
434  * Sets bits in UDCCR, leaving DME and FST bits as they were.
435  */
436 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
437 {
438 	u32 udccr = udc_readl(udc, UDCCR);
439 	udc_writel(udc, UDCCR,
440 			(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
441 }
442 
443 /**
444  * udc_clear_mask_UDCCR - clears bits in UDCCR
445  * @udc: udc device
446  * @mask: bit to clear in UDCCR
447  *
448  * Clears bits in UDCCR, leaving DME and FST bits as they were.
449  */
450 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
451 {
452 	u32 udccr = udc_readl(udc, UDCCR);
453 	udc_writel(udc, UDCCR,
454 			(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
455 }
456 
457 /**
458  * ep_write_UDCCSR - set bits in UDCCSR
459  * @udc: udc device
460  * @mask: bits to set in UDCCR
461  *
462  * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
463  *
464  * A specific case is applied to ep0 : the ACM bit is always set to 1, for
465  * SET_INTERFACE and SET_CONFIGURATION.
466  */
467 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
468 {
469 	if (is_ep0(ep))
470 		mask |= UDCCSR0_ACM;
471 	udc_ep_writel(ep, UDCCSR, mask);
472 }
473 
474 /**
475  * ep_count_bytes_remain - get how many bytes in udc endpoint
476  * @ep: udc endpoint
477  *
478  * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
479  */
480 static int ep_count_bytes_remain(struct pxa_ep *ep)
481 {
482 	if (ep->dir_in)
483 		return -EOPNOTSUPP;
484 	return udc_ep_readl(ep, UDCBCR) & 0x3ff;
485 }
486 
487 /**
488  * ep_is_empty - checks if ep has byte ready for reading
489  * @ep: udc endpoint
490  *
491  * If endpoint is the control endpoint, checks if there are bytes in the
492  * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
493  * are ready for reading on OUT endpoint.
494  *
495  * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
496  */
497 static int ep_is_empty(struct pxa_ep *ep)
498 {
499 	int ret;
500 
501 	if (!is_ep0(ep) && ep->dir_in)
502 		return -EOPNOTSUPP;
503 	if (is_ep0(ep))
504 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
505 	else
506 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
507 	return ret;
508 }
509 
510 /**
511  * ep_is_full - checks if ep has place to write bytes
512  * @ep: udc endpoint
513  *
514  * If endpoint is not the control endpoint and is an IN endpoint, checks if
515  * there is place to write bytes into the endpoint.
516  *
517  * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
518  */
519 static int ep_is_full(struct pxa_ep *ep)
520 {
521 	if (is_ep0(ep))
522 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
523 	if (!ep->dir_in)
524 		return -EOPNOTSUPP;
525 	return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
526 }
527 
528 /**
529  * epout_has_pkt - checks if OUT endpoint fifo has a packet available
530  * @ep: pxa endpoint
531  *
532  * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
533  */
534 static int epout_has_pkt(struct pxa_ep *ep)
535 {
536 	if (!is_ep0(ep) && ep->dir_in)
537 		return -EOPNOTSUPP;
538 	if (is_ep0(ep))
539 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
540 	return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
541 }
542 
543 /**
544  * set_ep0state - Set ep0 automata state
545  * @dev: udc device
546  * @state: state
547  */
548 static void set_ep0state(struct pxa_udc *udc, int state)
549 {
550 	struct pxa_ep *ep = &udc->pxa_ep[0];
551 	char *old_stname = EP0_STNAME(udc);
552 
553 	udc->ep0state = state;
554 	ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
555 		EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
556 		udc_ep_readl(ep, UDCBCR));
557 }
558 
559 /**
560  * ep0_idle - Put control endpoint into idle state
561  * @dev: udc device
562  */
563 static void ep0_idle(struct pxa_udc *dev)
564 {
565 	set_ep0state(dev, WAIT_FOR_SETUP);
566 }
567 
568 /**
569  * inc_ep_stats_reqs - Update ep stats counts
570  * @ep: physical endpoint
571  * @req: usb request
572  * @is_in: ep direction (USB_DIR_IN or 0)
573  *
574  */
575 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
576 {
577 	if (is_in)
578 		ep->stats.in_ops++;
579 	else
580 		ep->stats.out_ops++;
581 }
582 
583 /**
584  * inc_ep_stats_bytes - Update ep stats counts
585  * @ep: physical endpoint
586  * @count: bytes transferred on endpoint
587  * @is_in: ep direction (USB_DIR_IN or 0)
588  */
589 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
590 {
591 	if (is_in)
592 		ep->stats.in_bytes += count;
593 	else
594 		ep->stats.out_bytes += count;
595 }
596 
597 /**
598  * pxa_ep_setup - Sets up an usb physical endpoint
599  * @ep: pxa27x physical endpoint
600  *
601  * Find the physical pxa27x ep, and setup its UDCCR
602  */
603 static void pxa_ep_setup(struct pxa_ep *ep)
604 {
605 	u32 new_udccr;
606 
607 	new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
608 		| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
609 		| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
610 		| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
611 		| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
612 		| ((ep->dir_in) ? UDCCONR_ED : 0)
613 		| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
614 		| UDCCONR_EE;
615 
616 	udc_ep_writel(ep, UDCCR, new_udccr);
617 }
618 
619 /**
620  * pxa_eps_setup - Sets up all usb physical endpoints
621  * @dev: udc device
622  *
623  * Setup all pxa physical endpoints, except ep0
624  */
625 static void pxa_eps_setup(struct pxa_udc *dev)
626 {
627 	unsigned int i;
628 
629 	dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
630 
631 	for (i = 1; i < NR_PXA_ENDPOINTS; i++)
632 		pxa_ep_setup(&dev->pxa_ep[i]);
633 }
634 
635 /**
636  * pxa_ep_alloc_request - Allocate usb request
637  * @_ep: usb endpoint
638  * @gfp_flags:
639  *
640  * For the pxa27x, these can just wrap kmalloc/kfree.  gadget drivers
641  * must still pass correctly initialized endpoints, since other controller
642  * drivers may care about how it's currently set up (dma issues etc).
643   */
644 static struct usb_request *
645 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
646 {
647 	struct pxa27x_request *req;
648 
649 	req = kzalloc(sizeof *req, gfp_flags);
650 	if (!req)
651 		return NULL;
652 
653 	INIT_LIST_HEAD(&req->queue);
654 	req->in_use = 0;
655 	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
656 
657 	return &req->req;
658 }
659 
660 /**
661  * pxa_ep_free_request - Free usb request
662  * @_ep: usb endpoint
663  * @_req: usb request
664  *
665  * Wrapper around kfree to free _req
666  */
667 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
668 {
669 	struct pxa27x_request *req;
670 
671 	req = container_of(_req, struct pxa27x_request, req);
672 	WARN_ON(!list_empty(&req->queue));
673 	kfree(req);
674 }
675 
676 /**
677  * ep_add_request - add a request to the endpoint's queue
678  * @ep: usb endpoint
679  * @req: usb request
680  *
681  * Context: ep->lock held
682  *
683  * Queues the request in the endpoint's queue, and enables the interrupts
684  * on the endpoint.
685  */
686 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
687 {
688 	if (unlikely(!req))
689 		return;
690 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
691 		req->req.length, udc_ep_readl(ep, UDCCSR));
692 
693 	req->in_use = 1;
694 	list_add_tail(&req->queue, &ep->queue);
695 	pio_irq_enable(ep);
696 }
697 
698 /**
699  * ep_del_request - removes a request from the endpoint's queue
700  * @ep: usb endpoint
701  * @req: usb request
702  *
703  * Context: ep->lock held
704  *
705  * Unqueue the request from the endpoint's queue. If there are no more requests
706  * on the endpoint, and if it's not the control endpoint, interrupts are
707  * disabled on the endpoint.
708  */
709 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
710 {
711 	if (unlikely(!req))
712 		return;
713 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
714 		req->req.length, udc_ep_readl(ep, UDCCSR));
715 
716 	list_del_init(&req->queue);
717 	req->in_use = 0;
718 	if (!is_ep0(ep) && list_empty(&ep->queue))
719 		pio_irq_disable(ep);
720 }
721 
722 /**
723  * req_done - Complete an usb request
724  * @ep: pxa physical endpoint
725  * @req: pxa request
726  * @status: usb request status sent to gadget API
727  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
728  *
729  * Context: ep->lock held if flags not NULL, else ep->lock released
730  *
731  * Retire a pxa27x usb request. Endpoint must be locked.
732  */
733 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
734 	unsigned long *pflags)
735 {
736 	unsigned long	flags;
737 
738 	ep_del_request(ep, req);
739 	if (likely(req->req.status == -EINPROGRESS))
740 		req->req.status = status;
741 	else
742 		status = req->req.status;
743 
744 	if (status && status != -ESHUTDOWN)
745 		ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
746 			&req->req, status,
747 			req->req.actual, req->req.length);
748 
749 	if (pflags)
750 		spin_unlock_irqrestore(&ep->lock, *pflags);
751 	local_irq_save(flags);
752 	usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
753 	local_irq_restore(flags);
754 	if (pflags)
755 		spin_lock_irqsave(&ep->lock, *pflags);
756 }
757 
758 /**
759  * ep_end_out_req - Ends endpoint OUT request
760  * @ep: physical endpoint
761  * @req: pxa request
762  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
763  *
764  * Context: ep->lock held or released (see req_done())
765  *
766  * Ends endpoint OUT request (completes usb request).
767  */
768 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
769 	unsigned long *pflags)
770 {
771 	inc_ep_stats_reqs(ep, !USB_DIR_IN);
772 	req_done(ep, req, 0, pflags);
773 }
774 
775 /**
776  * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
777  * @ep: physical endpoint
778  * @req: pxa request
779  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
780  *
781  * Context: ep->lock held or released (see req_done())
782  *
783  * Ends control endpoint OUT request (completes usb request), and puts
784  * control endpoint into idle state
785  */
786 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
787 	unsigned long *pflags)
788 {
789 	set_ep0state(ep->dev, OUT_STATUS_STAGE);
790 	ep_end_out_req(ep, req, pflags);
791 	ep0_idle(ep->dev);
792 }
793 
794 /**
795  * ep_end_in_req - Ends endpoint IN request
796  * @ep: physical endpoint
797  * @req: pxa request
798  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
799  *
800  * Context: ep->lock held or released (see req_done())
801  *
802  * Ends endpoint IN request (completes usb request).
803  */
804 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
805 	unsigned long *pflags)
806 {
807 	inc_ep_stats_reqs(ep, USB_DIR_IN);
808 	req_done(ep, req, 0, pflags);
809 }
810 
811 /**
812  * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
813  * @ep: physical endpoint
814  * @req: pxa request
815  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
816  *
817  * Context: ep->lock held or released (see req_done())
818  *
819  * Ends control endpoint IN request (completes usb request), and puts
820  * control endpoint into status state
821  */
822 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
823 	unsigned long *pflags)
824 {
825 	set_ep0state(ep->dev, IN_STATUS_STAGE);
826 	ep_end_in_req(ep, req, pflags);
827 }
828 
829 /**
830  * nuke - Dequeue all requests
831  * @ep: pxa endpoint
832  * @status: usb request status
833  *
834  * Context: ep->lock released
835  *
836  * Dequeues all requests on an endpoint. As a side effect, interrupts will be
837  * disabled on that endpoint (because no more requests).
838  */
839 static void nuke(struct pxa_ep *ep, int status)
840 {
841 	struct pxa27x_request	*req;
842 	unsigned long		flags;
843 
844 	spin_lock_irqsave(&ep->lock, flags);
845 	while (!list_empty(&ep->queue)) {
846 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
847 		req_done(ep, req, status, &flags);
848 	}
849 	spin_unlock_irqrestore(&ep->lock, flags);
850 }
851 
852 /**
853  * read_packet - transfer 1 packet from an OUT endpoint into request
854  * @ep: pxa physical endpoint
855  * @req: usb request
856  *
857  * Takes bytes from OUT endpoint and transfers them info the usb request.
858  * If there is less space in request than bytes received in OUT endpoint,
859  * bytes are left in the OUT endpoint.
860  *
861  * Returns how many bytes were actually transferred
862  */
863 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
864 {
865 	u32 *buf;
866 	int bytes_ep, bufferspace, count, i;
867 
868 	bytes_ep = ep_count_bytes_remain(ep);
869 	bufferspace = req->req.length - req->req.actual;
870 
871 	buf = (u32 *)(req->req.buf + req->req.actual);
872 	prefetchw(buf);
873 
874 	if (likely(!ep_is_empty(ep)))
875 		count = min(bytes_ep, bufferspace);
876 	else /* zlp */
877 		count = 0;
878 
879 	for (i = count; i > 0; i -= 4)
880 		*buf++ = udc_ep_readl(ep, UDCDR);
881 	req->req.actual += count;
882 
883 	ep_write_UDCCSR(ep, UDCCSR_PC);
884 
885 	return count;
886 }
887 
888 /**
889  * write_packet - transfer 1 packet from request into an IN endpoint
890  * @ep: pxa physical endpoint
891  * @req: usb request
892  * @max: max bytes that fit into endpoint
893  *
894  * Takes bytes from usb request, and transfers them into the physical
895  * endpoint. If there are no bytes to transfer, doesn't write anything
896  * to physical endpoint.
897  *
898  * Returns how many bytes were actually transferred.
899  */
900 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
901 			unsigned int max)
902 {
903 	int length, count, remain, i;
904 	u32 *buf;
905 	u8 *buf_8;
906 
907 	buf = (u32 *)(req->req.buf + req->req.actual);
908 	prefetch(buf);
909 
910 	length = min(req->req.length - req->req.actual, max);
911 	req->req.actual += length;
912 
913 	remain = length & 0x3;
914 	count = length & ~(0x3);
915 	for (i = count; i > 0 ; i -= 4)
916 		udc_ep_writel(ep, UDCDR, *buf++);
917 
918 	buf_8 = (u8 *)buf;
919 	for (i = remain; i > 0; i--)
920 		udc_ep_writeb(ep, UDCDR, *buf_8++);
921 
922 	ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
923 		udc_ep_readl(ep, UDCCSR));
924 
925 	return length;
926 }
927 
928 /**
929  * read_fifo - Transfer packets from OUT endpoint into usb request
930  * @ep: pxa physical endpoint
931  * @req: usb request
932  *
933  * Context: callable when in_interrupt()
934  *
935  * Unload as many packets as possible from the fifo we use for usb OUT
936  * transfers and put them into the request. Caller should have made sure
937  * there's at least one packet ready.
938  * Doesn't complete the request, that's the caller's job
939  *
940  * Returns 1 if the request completed, 0 otherwise
941  */
942 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
943 {
944 	int count, is_short, completed = 0;
945 
946 	while (epout_has_pkt(ep)) {
947 		count = read_packet(ep, req);
948 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
949 
950 		is_short = (count < ep->fifo_size);
951 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
952 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
953 			&req->req, req->req.actual, req->req.length);
954 
955 		/* completion */
956 		if (is_short || req->req.actual == req->req.length) {
957 			completed = 1;
958 			break;
959 		}
960 		/* finished that packet.  the next one may be waiting... */
961 	}
962 	return completed;
963 }
964 
965 /**
966  * write_fifo - transfer packets from usb request into an IN endpoint
967  * @ep: pxa physical endpoint
968  * @req: pxa usb request
969  *
970  * Write to an IN endpoint fifo, as many packets as possible.
971  * irqs will use this to write the rest later.
972  * caller guarantees at least one packet buffer is ready (or a zlp).
973  * Doesn't complete the request, that's the caller's job
974  *
975  * Returns 1 if request fully transferred, 0 if partial transfer
976  */
977 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
978 {
979 	unsigned max;
980 	int count, is_short, is_last = 0, completed = 0, totcount = 0;
981 	u32 udccsr;
982 
983 	max = ep->fifo_size;
984 	do {
985 		is_short = 0;
986 
987 		udccsr = udc_ep_readl(ep, UDCCSR);
988 		if (udccsr & UDCCSR_PC) {
989 			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
990 				udccsr);
991 			ep_write_UDCCSR(ep, UDCCSR_PC);
992 		}
993 		if (udccsr & UDCCSR_TRN) {
994 			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
995 				udccsr);
996 			ep_write_UDCCSR(ep, UDCCSR_TRN);
997 		}
998 
999 		count = write_packet(ep, req, max);
1000 		inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1001 		totcount += count;
1002 
1003 		/* last packet is usually short (or a zlp) */
1004 		if (unlikely(count < max)) {
1005 			is_last = 1;
1006 			is_short = 1;
1007 		} else {
1008 			if (likely(req->req.length > req->req.actual)
1009 					|| req->req.zero)
1010 				is_last = 0;
1011 			else
1012 				is_last = 1;
1013 			/* interrupt/iso maxpacket may not fill the fifo */
1014 			is_short = unlikely(max < ep->fifo_size);
1015 		}
1016 
1017 		if (is_short)
1018 			ep_write_UDCCSR(ep, UDCCSR_SP);
1019 
1020 		/* requests complete when all IN data is in the FIFO */
1021 		if (is_last) {
1022 			completed = 1;
1023 			break;
1024 		}
1025 	} while (!ep_is_full(ep));
1026 
1027 	ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
1028 			totcount, is_last ? "/L" : "", is_short ? "/S" : "",
1029 			req->req.length - req->req.actual, &req->req);
1030 
1031 	return completed;
1032 }
1033 
1034 /**
1035  * read_ep0_fifo - Transfer packets from control endpoint into usb request
1036  * @ep: control endpoint
1037  * @req: pxa usb request
1038  *
1039  * Special ep0 version of the above read_fifo. Reads as many bytes from control
1040  * endpoint as can be read, and stores them into usb request (limited by request
1041  * maximum length).
1042  *
1043  * Returns 0 if usb request only partially filled, 1 if fully filled
1044  */
1045 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1046 {
1047 	int count, is_short, completed = 0;
1048 
1049 	while (epout_has_pkt(ep)) {
1050 		count = read_packet(ep, req);
1051 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1052 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1053 
1054 		is_short = (count < ep->fifo_size);
1055 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
1056 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
1057 			&req->req, req->req.actual, req->req.length);
1058 
1059 		if (is_short || req->req.actual >= req->req.length) {
1060 			completed = 1;
1061 			break;
1062 		}
1063 	}
1064 
1065 	return completed;
1066 }
1067 
1068 /**
1069  * write_ep0_fifo - Send a request to control endpoint (ep0 in)
1070  * @ep: control endpoint
1071  * @req: request
1072  *
1073  * Context: callable when in_interrupt()
1074  *
1075  * Sends a request (or a part of the request) to the control endpoint (ep0 in).
1076  * If the request doesn't fit, the remaining part will be sent from irq.
1077  * The request is considered fully written only if either :
1078  *   - last write transferred all remaining bytes, but fifo was not fully filled
1079  *   - last write was a 0 length write
1080  *
1081  * Returns 1 if request fully written, 0 if request only partially sent
1082  */
1083 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1084 {
1085 	unsigned	count;
1086 	int		is_last, is_short;
1087 
1088 	count = write_packet(ep, req, EP0_FIFO_SIZE);
1089 	inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1090 
1091 	is_short = (count < EP0_FIFO_SIZE);
1092 	is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
1093 
1094 	/* Sends either a short packet or a 0 length packet */
1095 	if (unlikely(is_short))
1096 		ep_write_UDCCSR(ep, UDCCSR0_IPR);
1097 
1098 	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1099 		count, is_short ? "/S" : "", is_last ? "/L" : "",
1100 		req->req.length - req->req.actual,
1101 		&req->req, udc_ep_readl(ep, UDCCSR));
1102 
1103 	return is_last;
1104 }
1105 
1106 /**
1107  * pxa_ep_queue - Queue a request into an IN endpoint
1108  * @_ep: usb endpoint
1109  * @_req: usb request
1110  * @gfp_flags: flags
1111  *
1112  * Context: normally called when !in_interrupt, but callable when in_interrupt()
1113  * in the special case of ep0 setup :
1114  *   (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
1115  *
1116  * Returns 0 if succedeed, error otherwise
1117  */
1118 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1119 			gfp_t gfp_flags)
1120 {
1121 	struct udc_usb_ep	*udc_usb_ep;
1122 	struct pxa_ep		*ep;
1123 	struct pxa27x_request	*req;
1124 	struct pxa_udc		*dev;
1125 	unsigned long		flags;
1126 	int			rc = 0;
1127 	int			is_first_req;
1128 	unsigned		length;
1129 	int			recursion_detected;
1130 
1131 	req = container_of(_req, struct pxa27x_request, req);
1132 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1133 
1134 	if (unlikely(!_req || !_req->complete || !_req->buf))
1135 		return -EINVAL;
1136 
1137 	if (unlikely(!_ep))
1138 		return -EINVAL;
1139 
1140 	dev = udc_usb_ep->dev;
1141 	ep = udc_usb_ep->pxa_ep;
1142 	if (unlikely(!ep))
1143 		return -EINVAL;
1144 
1145 	dev = ep->dev;
1146 	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1147 		ep_dbg(ep, "bogus device state\n");
1148 		return -ESHUTDOWN;
1149 	}
1150 
1151 	/* iso is always one packet per request, that's the only way
1152 	 * we can report per-packet status.  that also helps with dma.
1153 	 */
1154 	if (unlikely(EPXFERTYPE_is_ISO(ep)
1155 			&& req->req.length > ep->fifo_size))
1156 		return -EMSGSIZE;
1157 
1158 	spin_lock_irqsave(&ep->lock, flags);
1159 	recursion_detected = ep->in_handle_ep;
1160 
1161 	is_first_req = list_empty(&ep->queue);
1162 	ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
1163 			_req, is_first_req ? "yes" : "no",
1164 			_req->length, _req->buf);
1165 
1166 	if (!ep->enabled) {
1167 		_req->status = -ESHUTDOWN;
1168 		rc = -ESHUTDOWN;
1169 		goto out_locked;
1170 	}
1171 
1172 	if (req->in_use) {
1173 		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1174 		goto out_locked;
1175 	}
1176 
1177 	length = _req->length;
1178 	_req->status = -EINPROGRESS;
1179 	_req->actual = 0;
1180 
1181 	ep_add_request(ep, req);
1182 	spin_unlock_irqrestore(&ep->lock, flags);
1183 
1184 	if (is_ep0(ep)) {
1185 		switch (dev->ep0state) {
1186 		case WAIT_ACK_SET_CONF_INTERF:
1187 			if (length == 0) {
1188 				ep_end_in_req(ep, req, NULL);
1189 			} else {
1190 				ep_err(ep, "got a request of %d bytes while"
1191 					"in state WAIT_ACK_SET_CONF_INTERF\n",
1192 					length);
1193 				ep_del_request(ep, req);
1194 				rc = -EL2HLT;
1195 			}
1196 			ep0_idle(ep->dev);
1197 			break;
1198 		case IN_DATA_STAGE:
1199 			if (!ep_is_full(ep))
1200 				if (write_ep0_fifo(ep, req))
1201 					ep0_end_in_req(ep, req, NULL);
1202 			break;
1203 		case OUT_DATA_STAGE:
1204 			if ((length == 0) || !epout_has_pkt(ep))
1205 				if (read_ep0_fifo(ep, req))
1206 					ep0_end_out_req(ep, req, NULL);
1207 			break;
1208 		default:
1209 			ep_err(ep, "odd state %s to send me a request\n",
1210 				EP0_STNAME(ep->dev));
1211 			ep_del_request(ep, req);
1212 			rc = -EL2HLT;
1213 			break;
1214 		}
1215 	} else {
1216 		if (!recursion_detected)
1217 			handle_ep(ep);
1218 	}
1219 
1220 out:
1221 	return rc;
1222 out_locked:
1223 	spin_unlock_irqrestore(&ep->lock, flags);
1224 	goto out;
1225 }
1226 
1227 /**
1228  * pxa_ep_dequeue - Dequeue one request
1229  * @_ep: usb endpoint
1230  * @_req: usb request
1231  *
1232  * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
1233  */
1234 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1235 {
1236 	struct pxa_ep		*ep;
1237 	struct udc_usb_ep	*udc_usb_ep;
1238 	struct pxa27x_request	*req;
1239 	unsigned long		flags;
1240 	int			rc = -EINVAL;
1241 
1242 	if (!_ep)
1243 		return rc;
1244 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1245 	ep = udc_usb_ep->pxa_ep;
1246 	if (!ep || is_ep0(ep))
1247 		return rc;
1248 
1249 	spin_lock_irqsave(&ep->lock, flags);
1250 
1251 	/* make sure it's actually queued on this endpoint */
1252 	list_for_each_entry(req, &ep->queue, queue) {
1253 		if (&req->req == _req) {
1254 			rc = 0;
1255 			break;
1256 		}
1257 	}
1258 
1259 	spin_unlock_irqrestore(&ep->lock, flags);
1260 	if (!rc)
1261 		req_done(ep, req, -ECONNRESET, NULL);
1262 	return rc;
1263 }
1264 
1265 /**
1266  * pxa_ep_set_halt - Halts operations on one endpoint
1267  * @_ep: usb endpoint
1268  * @value:
1269  *
1270  * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
1271  */
1272 static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1273 {
1274 	struct pxa_ep		*ep;
1275 	struct udc_usb_ep	*udc_usb_ep;
1276 	unsigned long flags;
1277 	int rc;
1278 
1279 
1280 	if (!_ep)
1281 		return -EINVAL;
1282 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1283 	ep = udc_usb_ep->pxa_ep;
1284 	if (!ep || is_ep0(ep))
1285 		return -EINVAL;
1286 
1287 	if (value == 0) {
1288 		/*
1289 		 * This path (reset toggle+halt) is needed to implement
1290 		 * SET_INTERFACE on normal hardware.  but it can't be
1291 		 * done from software on the PXA UDC, and the hardware
1292 		 * forgets to do it as part of SET_INTERFACE automagic.
1293 		 */
1294 		ep_dbg(ep, "only host can clear halt\n");
1295 		return -EROFS;
1296 	}
1297 
1298 	spin_lock_irqsave(&ep->lock, flags);
1299 
1300 	rc = -EAGAIN;
1301 	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
1302 		goto out;
1303 
1304 	/* FST, FEF bits are the same for control and non control endpoints */
1305 	rc = 0;
1306 	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
1307 	if (is_ep0(ep))
1308 		set_ep0state(ep->dev, STALL);
1309 
1310 out:
1311 	spin_unlock_irqrestore(&ep->lock, flags);
1312 	return rc;
1313 }
1314 
1315 /**
1316  * pxa_ep_fifo_status - Get how many bytes in physical endpoint
1317  * @_ep: usb endpoint
1318  *
1319  * Returns number of bytes in OUT fifos. Broken for IN fifos.
1320  */
1321 static int pxa_ep_fifo_status(struct usb_ep *_ep)
1322 {
1323 	struct pxa_ep		*ep;
1324 	struct udc_usb_ep	*udc_usb_ep;
1325 
1326 	if (!_ep)
1327 		return -ENODEV;
1328 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1329 	ep = udc_usb_ep->pxa_ep;
1330 	if (!ep || is_ep0(ep))
1331 		return -ENODEV;
1332 
1333 	if (ep->dir_in)
1334 		return -EOPNOTSUPP;
1335 	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
1336 		return 0;
1337 	else
1338 		return ep_count_bytes_remain(ep) + 1;
1339 }
1340 
1341 /**
1342  * pxa_ep_fifo_flush - Flushes one endpoint
1343  * @_ep: usb endpoint
1344  *
1345  * Discards all data in one endpoint(IN or OUT), except control endpoint.
1346  */
1347 static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1348 {
1349 	struct pxa_ep		*ep;
1350 	struct udc_usb_ep	*udc_usb_ep;
1351 	unsigned long		flags;
1352 
1353 	if (!_ep)
1354 		return;
1355 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1356 	ep = udc_usb_ep->pxa_ep;
1357 	if (!ep || is_ep0(ep))
1358 		return;
1359 
1360 	spin_lock_irqsave(&ep->lock, flags);
1361 
1362 	if (unlikely(!list_empty(&ep->queue)))
1363 		ep_dbg(ep, "called while queue list not empty\n");
1364 	ep_dbg(ep, "called\n");
1365 
1366 	/* for OUT, just read and discard the FIFO contents. */
1367 	if (!ep->dir_in) {
1368 		while (!ep_is_empty(ep))
1369 			udc_ep_readl(ep, UDCDR);
1370 	} else {
1371 		/* most IN status is the same, but ISO can't stall */
1372 		ep_write_UDCCSR(ep,
1373 				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1374 				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1375 	}
1376 
1377 	spin_unlock_irqrestore(&ep->lock, flags);
1378 }
1379 
1380 /**
1381  * pxa_ep_enable - Enables usb endpoint
1382  * @_ep: usb endpoint
1383  * @desc: usb endpoint descriptor
1384  *
1385  * Nothing much to do here, as ep configuration is done once and for all
1386  * before udc is enabled. After udc enable, no physical endpoint configuration
1387  * can be changed.
1388  * Function makes sanity checks and flushes the endpoint.
1389  */
1390 static int pxa_ep_enable(struct usb_ep *_ep,
1391 	const struct usb_endpoint_descriptor *desc)
1392 {
1393 	struct pxa_ep		*ep;
1394 	struct udc_usb_ep	*udc_usb_ep;
1395 	struct pxa_udc		*udc;
1396 
1397 	if (!_ep || !desc)
1398 		return -EINVAL;
1399 
1400 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1401 	if (udc_usb_ep->pxa_ep) {
1402 		ep = udc_usb_ep->pxa_ep;
1403 		ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
1404 			_ep->name);
1405 	} else {
1406 		ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
1407 	}
1408 
1409 	if (!ep || is_ep0(ep)) {
1410 		dev_err(udc_usb_ep->dev->dev,
1411 			"unable to match pxa_ep for ep %s\n",
1412 			_ep->name);
1413 		return -EINVAL;
1414 	}
1415 
1416 	if ((desc->bDescriptorType != USB_DT_ENDPOINT)
1417 			|| (ep->type != usb_endpoint_type(desc))) {
1418 		ep_err(ep, "type mismatch\n");
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (ep->fifo_size < usb_endpoint_maxp(desc)) {
1423 		ep_err(ep, "bad maxpacket\n");
1424 		return -ERANGE;
1425 	}
1426 
1427 	udc_usb_ep->pxa_ep = ep;
1428 	udc = ep->dev;
1429 
1430 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
1431 		ep_err(ep, "bogus device state\n");
1432 		return -ESHUTDOWN;
1433 	}
1434 
1435 	ep->enabled = 1;
1436 
1437 	/* flush fifo (mostly for OUT buffers) */
1438 	pxa_ep_fifo_flush(_ep);
1439 
1440 	ep_dbg(ep, "enabled\n");
1441 	return 0;
1442 }
1443 
1444 /**
1445  * pxa_ep_disable - Disable usb endpoint
1446  * @_ep: usb endpoint
1447  *
1448  * Same as for pxa_ep_enable, no physical endpoint configuration can be
1449  * changed.
1450  * Function flushes the endpoint and related requests.
1451  */
1452 static int pxa_ep_disable(struct usb_ep *_ep)
1453 {
1454 	struct pxa_ep		*ep;
1455 	struct udc_usb_ep	*udc_usb_ep;
1456 
1457 	if (!_ep)
1458 		return -EINVAL;
1459 
1460 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1461 	ep = udc_usb_ep->pxa_ep;
1462 	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1463 		return -EINVAL;
1464 
1465 	ep->enabled = 0;
1466 	nuke(ep, -ESHUTDOWN);
1467 
1468 	pxa_ep_fifo_flush(_ep);
1469 	udc_usb_ep->pxa_ep = NULL;
1470 
1471 	ep_dbg(ep, "disabled\n");
1472 	return 0;
1473 }
1474 
1475 static struct usb_ep_ops pxa_ep_ops = {
1476 	.enable		= pxa_ep_enable,
1477 	.disable	= pxa_ep_disable,
1478 
1479 	.alloc_request	= pxa_ep_alloc_request,
1480 	.free_request	= pxa_ep_free_request,
1481 
1482 	.queue		= pxa_ep_queue,
1483 	.dequeue	= pxa_ep_dequeue,
1484 
1485 	.set_halt	= pxa_ep_set_halt,
1486 	.fifo_status	= pxa_ep_fifo_status,
1487 	.fifo_flush	= pxa_ep_fifo_flush,
1488 };
1489 
1490 /**
1491  * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
1492  * @udc: udc device
1493  * @on: 0 if disconnect pullup resistor, 1 otherwise
1494  * Context: any
1495  *
1496  * Handle D+ pullup resistor, make the device visible to the usb bus, and
1497  * declare it as a full speed usb device
1498  */
1499 static void dplus_pullup(struct pxa_udc *udc, int on)
1500 {
1501 	if (udc->gpiod) {
1502 		gpiod_set_value(udc->gpiod, on);
1503 	} else if (udc->udc_command) {
1504 		if (on)
1505 			udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
1506 		else
1507 			udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
1508 	}
1509 	udc->pullup_on = on;
1510 }
1511 
1512 /**
1513  * pxa_udc_get_frame - Returns usb frame number
1514  * @_gadget: usb gadget
1515  */
1516 static int pxa_udc_get_frame(struct usb_gadget *_gadget)
1517 {
1518 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1519 
1520 	return (udc_readl(udc, UDCFNR) & 0x7ff);
1521 }
1522 
1523 /**
1524  * pxa_udc_wakeup - Force udc device out of suspend
1525  * @_gadget: usb gadget
1526  *
1527  * Returns 0 if successful, error code otherwise
1528  */
1529 static int pxa_udc_wakeup(struct usb_gadget *_gadget)
1530 {
1531 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1532 
1533 	/* host may not have enabled remote wakeup */
1534 	if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
1535 		return -EHOSTUNREACH;
1536 	udc_set_mask_UDCCR(udc, UDCCR_UDR);
1537 	return 0;
1538 }
1539 
1540 static void udc_enable(struct pxa_udc *udc);
1541 static void udc_disable(struct pxa_udc *udc);
1542 
1543 /**
1544  * should_enable_udc - Tells if UDC should be enabled
1545  * @udc: udc device
1546  * Context: any
1547  *
1548  * The UDC should be enabled if :
1549 
1550  *  - the pullup resistor is connected
1551  *  - and a gadget driver is bound
1552  *  - and vbus is sensed (or no vbus sense is available)
1553  *
1554  * Returns 1 if UDC should be enabled, 0 otherwise
1555  */
1556 static int should_enable_udc(struct pxa_udc *udc)
1557 {
1558 	int put_on;
1559 
1560 	put_on = ((udc->pullup_on) && (udc->driver));
1561 	put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver)));
1562 	return put_on;
1563 }
1564 
1565 /**
1566  * should_disable_udc - Tells if UDC should be disabled
1567  * @udc: udc device
1568  * Context: any
1569  *
1570  * The UDC should be disabled if :
1571  *  - the pullup resistor is not connected
1572  *  - or no gadget driver is bound
1573  *  - or no vbus is sensed (when vbus sesing is available)
1574  *
1575  * Returns 1 if UDC should be disabled
1576  */
1577 static int should_disable_udc(struct pxa_udc *udc)
1578 {
1579 	int put_off;
1580 
1581 	put_off = ((!udc->pullup_on) || (!udc->driver));
1582 	put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver)));
1583 	return put_off;
1584 }
1585 
1586 /**
1587  * pxa_udc_pullup - Offer manual D+ pullup control
1588  * @_gadget: usb gadget using the control
1589  * @is_active: 0 if disconnect, else connect D+ pullup resistor
1590  * Context: !in_interrupt()
1591  *
1592  * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
1593  */
1594 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
1595 {
1596 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1597 
1598 	if (!udc->gpiod && !udc->udc_command)
1599 		return -EOPNOTSUPP;
1600 
1601 	dplus_pullup(udc, is_active);
1602 
1603 	if (should_enable_udc(udc))
1604 		udc_enable(udc);
1605 	if (should_disable_udc(udc))
1606 		udc_disable(udc);
1607 	return 0;
1608 }
1609 
1610 static void udc_enable(struct pxa_udc *udc);
1611 static void udc_disable(struct pxa_udc *udc);
1612 
1613 /**
1614  * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
1615  * @_gadget: usb gadget
1616  * @is_active: 0 if should disable the udc, 1 if should enable
1617  *
1618  * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
1619  * udc, and deactivates D+ pullup resistor.
1620  *
1621  * Returns 0
1622  */
1623 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1624 {
1625 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1626 
1627 	udc->vbus_sensed = is_active;
1628 	if (should_enable_udc(udc))
1629 		udc_enable(udc);
1630 	if (should_disable_udc(udc))
1631 		udc_disable(udc);
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
1638  * @_gadget: usb gadget
1639  * @mA: current drawn
1640  *
1641  * Context: !in_interrupt()
1642  *
1643  * Called after a configuration was chosen by a USB host, to inform how much
1644  * current can be drawn by the device from VBus line.
1645  *
1646  * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
1647  */
1648 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1649 {
1650 	struct pxa_udc *udc;
1651 
1652 	udc = to_gadget_udc(_gadget);
1653 	if (!IS_ERR_OR_NULL(udc->transceiver))
1654 		return usb_phy_set_power(udc->transceiver, mA);
1655 	return -EOPNOTSUPP;
1656 }
1657 
1658 static int pxa27x_udc_start(struct usb_gadget *g,
1659 		struct usb_gadget_driver *driver);
1660 static int pxa27x_udc_stop(struct usb_gadget *g);
1661 
1662 static const struct usb_gadget_ops pxa_udc_ops = {
1663 	.get_frame	= pxa_udc_get_frame,
1664 	.wakeup		= pxa_udc_wakeup,
1665 	.pullup		= pxa_udc_pullup,
1666 	.vbus_session	= pxa_udc_vbus_session,
1667 	.vbus_draw	= pxa_udc_vbus_draw,
1668 	.udc_start	= pxa27x_udc_start,
1669 	.udc_stop	= pxa27x_udc_stop,
1670 };
1671 
1672 /**
1673  * udc_disable - disable udc device controller
1674  * @udc: udc device
1675  * Context: any
1676  *
1677  * Disables the udc device : disables clocks, udc interrupts, control endpoint
1678  * interrupts.
1679  */
1680 static void udc_disable(struct pxa_udc *udc)
1681 {
1682 	if (!udc->enabled)
1683 		return;
1684 
1685 	udc_writel(udc, UDCICR0, 0);
1686 	udc_writel(udc, UDCICR1, 0);
1687 
1688 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1689 
1690 	ep0_idle(udc);
1691 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1692 	clk_disable(udc->clk);
1693 
1694 	udc->enabled = 0;
1695 }
1696 
1697 /**
1698  * udc_init_data - Initialize udc device data structures
1699  * @dev: udc device
1700  *
1701  * Initializes gadget endpoint list, endpoints locks. No action is taken
1702  * on the hardware.
1703  */
1704 static void udc_init_data(struct pxa_udc *dev)
1705 {
1706 	int i;
1707 	struct pxa_ep *ep;
1708 
1709 	/* device/ep0 records init */
1710 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1711 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1712 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
1713 	dev->gadget.quirk_altset_not_supp = 1;
1714 	ep0_idle(dev);
1715 
1716 	/* PXA endpoints init */
1717 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
1718 		ep = &dev->pxa_ep[i];
1719 
1720 		ep->enabled = is_ep0(ep);
1721 		INIT_LIST_HEAD(&ep->queue);
1722 		spin_lock_init(&ep->lock);
1723 	}
1724 
1725 	/* USB endpoints init */
1726 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
1727 		list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
1728 				&dev->gadget.ep_list);
1729 		usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
1730 					   dev->udc_usb_ep[i].usb_ep.maxpacket);
1731 	}
1732 }
1733 
1734 /**
1735  * udc_enable - Enables the udc device
1736  * @dev: udc device
1737  *
1738  * Enables the udc device : enables clocks, udc interrupts, control endpoint
1739  * interrupts, sets usb as UDC client and setups endpoints.
1740  */
1741 static void udc_enable(struct pxa_udc *udc)
1742 {
1743 	if (udc->enabled)
1744 		return;
1745 
1746 	clk_enable(udc->clk);
1747 	udc_writel(udc, UDCICR0, 0);
1748 	udc_writel(udc, UDCICR1, 0);
1749 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1750 
1751 	ep0_idle(udc);
1752 	udc->gadget.speed = USB_SPEED_FULL;
1753 	memset(&udc->stats, 0, sizeof(udc->stats));
1754 
1755 	pxa_eps_setup(udc);
1756 	udc_set_mask_UDCCR(udc, UDCCR_UDE);
1757 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
1758 	udelay(2);
1759 	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1760 		dev_err(udc->dev, "Configuration errors, udc disabled\n");
1761 
1762 	/*
1763 	 * Caller must be able to sleep in order to cope with startup transients
1764 	 */
1765 	msleep(100);
1766 
1767 	/* enable suspend/resume and reset irqs */
1768 	udc_writel(udc, UDCICR1,
1769 			UDCICR1_IECC | UDCICR1_IERU
1770 			| UDCICR1_IESU | UDCICR1_IERS);
1771 
1772 	/* enable ep0 irqs */
1773 	pio_irq_enable(&udc->pxa_ep[0]);
1774 
1775 	udc->enabled = 1;
1776 }
1777 
1778 /**
1779  * pxa27x_start - Register gadget driver
1780  * @driver: gadget driver
1781  * @bind: bind function
1782  *
1783  * When a driver is successfully registered, it will receive control requests
1784  * including set_configuration(), which enables non-control requests.  Then
1785  * usb traffic follows until a disconnect is reported.  Then a host may connect
1786  * again, or the driver might get unbound.
1787  *
1788  * Note that the udc is not automatically enabled. Check function
1789  * should_enable_udc().
1790  *
1791  * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1792  */
1793 static int pxa27x_udc_start(struct usb_gadget *g,
1794 		struct usb_gadget_driver *driver)
1795 {
1796 	struct pxa_udc *udc = to_pxa(g);
1797 	int retval;
1798 
1799 	/* first hook up the driver ... */
1800 	udc->driver = driver;
1801 
1802 	if (!IS_ERR_OR_NULL(udc->transceiver)) {
1803 		retval = otg_set_peripheral(udc->transceiver->otg,
1804 						&udc->gadget);
1805 		if (retval) {
1806 			dev_err(udc->dev, "can't bind to transceiver\n");
1807 			goto fail;
1808 		}
1809 	}
1810 
1811 	if (should_enable_udc(udc))
1812 		udc_enable(udc);
1813 	return 0;
1814 
1815 fail:
1816 	udc->driver = NULL;
1817 	return retval;
1818 }
1819 
1820 /**
1821  * stop_activity - Stops udc endpoints
1822  * @udc: udc device
1823  * @driver: gadget driver
1824  *
1825  * Disables all udc endpoints (even control endpoint), report disconnect to
1826  * the gadget user.
1827  */
1828 static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
1829 {
1830 	int i;
1831 
1832 	/* don't disconnect drivers more than once */
1833 	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1834 		driver = NULL;
1835 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1836 
1837 	for (i = 0; i < NR_USB_ENDPOINTS; i++)
1838 		pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
1839 }
1840 
1841 /**
1842  * pxa27x_udc_stop - Unregister the gadget driver
1843  * @driver: gadget driver
1844  *
1845  * Returns 0 if no error, -ENODEV, -EINVAL otherwise
1846  */
1847 static int pxa27x_udc_stop(struct usb_gadget *g)
1848 {
1849 	struct pxa_udc *udc = to_pxa(g);
1850 
1851 	stop_activity(udc, NULL);
1852 	udc_disable(udc);
1853 
1854 	udc->driver = NULL;
1855 
1856 	if (!IS_ERR_OR_NULL(udc->transceiver))
1857 		return otg_set_peripheral(udc->transceiver->otg, NULL);
1858 	return 0;
1859 }
1860 
1861 /**
1862  * handle_ep0_ctrl_req - handle control endpoint control request
1863  * @udc: udc device
1864  * @req: control request
1865  */
1866 static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1867 				struct pxa27x_request *req)
1868 {
1869 	struct pxa_ep *ep = &udc->pxa_ep[0];
1870 	union {
1871 		struct usb_ctrlrequest	r;
1872 		u32			word[2];
1873 	} u;
1874 	int i;
1875 	int have_extrabytes = 0;
1876 	unsigned long flags;
1877 
1878 	nuke(ep, -EPROTO);
1879 	spin_lock_irqsave(&ep->lock, flags);
1880 
1881 	/*
1882 	 * In the PXA320 manual, in the section about Back-to-Back setup
1883 	 * packets, it describes this situation.  The solution is to set OPC to
1884 	 * get rid of the status packet, and then continue with the setup
1885 	 * packet. Generalize to pxa27x CPUs.
1886 	 */
1887 	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
1888 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1889 
1890 	/* read SETUP packet */
1891 	for (i = 0; i < 2; i++) {
1892 		if (unlikely(ep_is_empty(ep)))
1893 			goto stall;
1894 		u.word[i] = udc_ep_readl(ep, UDCDR);
1895 	}
1896 
1897 	have_extrabytes = !ep_is_empty(ep);
1898 	while (!ep_is_empty(ep)) {
1899 		i = udc_ep_readl(ep, UDCDR);
1900 		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
1901 	}
1902 
1903 	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1904 		u.r.bRequestType, u.r.bRequest,
1905 		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
1906 		le16_to_cpu(u.r.wLength));
1907 	if (unlikely(have_extrabytes))
1908 		goto stall;
1909 
1910 	if (u.r.bRequestType & USB_DIR_IN)
1911 		set_ep0state(udc, IN_DATA_STAGE);
1912 	else
1913 		set_ep0state(udc, OUT_DATA_STAGE);
1914 
1915 	/* Tell UDC to enter Data Stage */
1916 	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1917 
1918 	spin_unlock_irqrestore(&ep->lock, flags);
1919 	i = udc->driver->setup(&udc->gadget, &u.r);
1920 	spin_lock_irqsave(&ep->lock, flags);
1921 	if (i < 0)
1922 		goto stall;
1923 out:
1924 	spin_unlock_irqrestore(&ep->lock, flags);
1925 	return;
1926 stall:
1927 	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1928 		udc_ep_readl(ep, UDCCSR), i);
1929 	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
1930 	set_ep0state(udc, STALL);
1931 	goto out;
1932 }
1933 
1934 /**
1935  * handle_ep0 - Handle control endpoint data transfers
1936  * @udc: udc device
1937  * @fifo_irq: 1 if triggered by fifo service type irq
1938  * @opc_irq: 1 if triggered by output packet complete type irq
1939  *
1940  * Context : when in_interrupt() or with ep->lock held
1941  *
1942  * Tries to transfer all pending request data into the endpoint and/or
1943  * transfer all pending data in the endpoint into usb requests.
1944  * Handles states of ep0 automata.
1945  *
1946  * PXA27x hardware handles several standard usb control requests without
1947  * driver notification.  The requests fully handled by hardware are :
1948  *  SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
1949  *  GET_STATUS
1950  * The requests handled by hardware, but with irq notification are :
1951  *  SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
1952  * The remaining standard requests really handled by handle_ep0 are :
1953  *  GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
1954  * Requests standardized outside of USB 2.0 chapter 9 are handled more
1955  * uniformly, by gadget drivers.
1956  *
1957  * The control endpoint state machine is _not_ USB spec compliant, it's even
1958  * hardly compliant with Intel PXA270 developers guide.
1959  * The key points which inferred this state machine are :
1960  *   - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
1961  *     software.
1962  *   - on every OUT packet received, UDCCSR0_OPC is raised and held until
1963  *     cleared by software.
1964  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
1965  *     before reading ep0.
1966  *     This is true only for PXA27x. This is not true anymore for PXA3xx family
1967  *     (check Back-to-Back setup packet in developers guide).
1968  *   - irq can be called on a "packet complete" event (opc_irq=1), while
1969  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
1970  *     from experimentation).
1971  *   - as UDCCSR0_SA can be activated while in irq handling, and clearing
1972  *     UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
1973  *     => we never actually read the "status stage" packet of an IN data stage
1974  *     => this is not documented in Intel documentation
1975  *   - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
1976  *     STAGE. The driver add STATUS STAGE to send last zero length packet in
1977  *     OUT_STATUS_STAGE.
1978  *   - special attention was needed for IN_STATUS_STAGE. If a packet complete
1979  *     event is detected, we terminate the status stage without ackowledging the
1980  *     packet (not to risk to loose a potential SETUP packet)
1981  */
1982 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
1983 {
1984 	u32			udccsr0;
1985 	struct pxa_ep		*ep = &udc->pxa_ep[0];
1986 	struct pxa27x_request	*req = NULL;
1987 	int			completed = 0;
1988 
1989 	if (!list_empty(&ep->queue))
1990 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
1991 
1992 	udccsr0 = udc_ep_readl(ep, UDCCSR);
1993 	ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
1994 		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
1995 		(fifo_irq << 1 | opc_irq));
1996 
1997 	if (udccsr0 & UDCCSR0_SST) {
1998 		ep_dbg(ep, "clearing stall status\n");
1999 		nuke(ep, -EPIPE);
2000 		ep_write_UDCCSR(ep, UDCCSR0_SST);
2001 		ep0_idle(udc);
2002 	}
2003 
2004 	if (udccsr0 & UDCCSR0_SA) {
2005 		nuke(ep, 0);
2006 		set_ep0state(udc, SETUP_STAGE);
2007 	}
2008 
2009 	switch (udc->ep0state) {
2010 	case WAIT_FOR_SETUP:
2011 		/*
2012 		 * Hardware bug : beware, we cannot clear OPC, since we would
2013 		 * miss a potential OPC irq for a setup packet.
2014 		 * So, we only do ... nothing, and hope for a next irq with
2015 		 * UDCCSR0_SA set.
2016 		 */
2017 		break;
2018 	case SETUP_STAGE:
2019 		udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
2020 		if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
2021 			handle_ep0_ctrl_req(udc, req);
2022 		break;
2023 	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
2024 		if (epout_has_pkt(ep))
2025 			ep_write_UDCCSR(ep, UDCCSR0_OPC);
2026 		if (req && !ep_is_full(ep))
2027 			completed = write_ep0_fifo(ep, req);
2028 		if (completed)
2029 			ep0_end_in_req(ep, req, NULL);
2030 		break;
2031 	case OUT_DATA_STAGE:			/* SET_DESCRIPTOR */
2032 		if (epout_has_pkt(ep) && req)
2033 			completed = read_ep0_fifo(ep, req);
2034 		if (completed)
2035 			ep0_end_out_req(ep, req, NULL);
2036 		break;
2037 	case STALL:
2038 		ep_write_UDCCSR(ep, UDCCSR0_FST);
2039 		break;
2040 	case IN_STATUS_STAGE:
2041 		/*
2042 		 * Hardware bug : beware, we cannot clear OPC, since we would
2043 		 * miss a potential PC irq for a setup packet.
2044 		 * So, we only put the ep0 into WAIT_FOR_SETUP state.
2045 		 */
2046 		if (opc_irq)
2047 			ep0_idle(udc);
2048 		break;
2049 	case OUT_STATUS_STAGE:
2050 	case WAIT_ACK_SET_CONF_INTERF:
2051 		ep_warn(ep, "should never get in %s state here!!!\n",
2052 				EP0_STNAME(ep->dev));
2053 		ep0_idle(udc);
2054 		break;
2055 	}
2056 }
2057 
2058 /**
2059  * handle_ep - Handle endpoint data tranfers
2060  * @ep: pxa physical endpoint
2061  *
2062  * Tries to transfer all pending request data into the endpoint and/or
2063  * transfer all pending data in the endpoint into usb requests.
2064  *
2065  * Is always called when in_interrupt() and with ep->lock released.
2066  */
2067 static void handle_ep(struct pxa_ep *ep)
2068 {
2069 	struct pxa27x_request	*req;
2070 	int completed;
2071 	u32 udccsr;
2072 	int is_in = ep->dir_in;
2073 	int loop = 0;
2074 	unsigned long		flags;
2075 
2076 	spin_lock_irqsave(&ep->lock, flags);
2077 	if (ep->in_handle_ep)
2078 		goto recursion_detected;
2079 	ep->in_handle_ep = 1;
2080 
2081 	do {
2082 		completed = 0;
2083 		udccsr = udc_ep_readl(ep, UDCCSR);
2084 
2085 		if (likely(!list_empty(&ep->queue)))
2086 			req = list_entry(ep->queue.next,
2087 					struct pxa27x_request, queue);
2088 		else
2089 			req = NULL;
2090 
2091 		ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
2092 				req, udccsr, loop++);
2093 
2094 		if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
2095 			udc_ep_writel(ep, UDCCSR,
2096 					udccsr & (UDCCSR_SST | UDCCSR_TRN));
2097 		if (!req)
2098 			break;
2099 
2100 		if (unlikely(is_in)) {
2101 			if (likely(!ep_is_full(ep)))
2102 				completed = write_fifo(ep, req);
2103 		} else {
2104 			if (likely(epout_has_pkt(ep)))
2105 				completed = read_fifo(ep, req);
2106 		}
2107 
2108 		if (completed) {
2109 			if (is_in)
2110 				ep_end_in_req(ep, req, &flags);
2111 			else
2112 				ep_end_out_req(ep, req, &flags);
2113 		}
2114 	} while (completed);
2115 
2116 	ep->in_handle_ep = 0;
2117 recursion_detected:
2118 	spin_unlock_irqrestore(&ep->lock, flags);
2119 }
2120 
2121 /**
2122  * pxa27x_change_configuration - Handle SET_CONF usb request notification
2123  * @udc: udc device
2124  * @config: usb configuration
2125  *
2126  * Post the request to upper level.
2127  * Don't use any pxa specific harware configuration capabilities
2128  */
2129 static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
2130 {
2131 	struct usb_ctrlrequest req ;
2132 
2133 	dev_dbg(udc->dev, "config=%d\n", config);
2134 
2135 	udc->config = config;
2136 	udc->last_interface = 0;
2137 	udc->last_alternate = 0;
2138 
2139 	req.bRequestType = 0;
2140 	req.bRequest = USB_REQ_SET_CONFIGURATION;
2141 	req.wValue = config;
2142 	req.wIndex = 0;
2143 	req.wLength = 0;
2144 
2145 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2146 	udc->driver->setup(&udc->gadget, &req);
2147 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2148 }
2149 
2150 /**
2151  * pxa27x_change_interface - Handle SET_INTERF usb request notification
2152  * @udc: udc device
2153  * @iface: interface number
2154  * @alt: alternate setting number
2155  *
2156  * Post the request to upper level.
2157  * Don't use any pxa specific harware configuration capabilities
2158  */
2159 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
2160 {
2161 	struct usb_ctrlrequest  req;
2162 
2163 	dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
2164 
2165 	udc->last_interface = iface;
2166 	udc->last_alternate = alt;
2167 
2168 	req.bRequestType = USB_RECIP_INTERFACE;
2169 	req.bRequest = USB_REQ_SET_INTERFACE;
2170 	req.wValue = alt;
2171 	req.wIndex = iface;
2172 	req.wLength = 0;
2173 
2174 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2175 	udc->driver->setup(&udc->gadget, &req);
2176 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2177 }
2178 
2179 /*
2180  * irq_handle_data - Handle data transfer
2181  * @irq: irq IRQ number
2182  * @udc: dev pxa_udc device structure
2183  *
2184  * Called from irq handler, transferts data to or from endpoint to queue
2185  */
2186 static void irq_handle_data(int irq, struct pxa_udc *udc)
2187 {
2188 	int i;
2189 	struct pxa_ep *ep;
2190 	u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
2191 	u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
2192 
2193 	if (udcisr0 & UDCISR_INT_MASK) {
2194 		udc->pxa_ep[0].stats.irqs++;
2195 		udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
2196 		handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
2197 				!!(udcisr0 & UDCICR_PKTCOMPL));
2198 	}
2199 
2200 	udcisr0 >>= 2;
2201 	for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
2202 		if (!(udcisr0 & UDCISR_INT_MASK))
2203 			continue;
2204 
2205 		udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2206 
2207 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2208 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2209 			ep = &udc->pxa_ep[i];
2210 			ep->stats.irqs++;
2211 			handle_ep(ep);
2212 		}
2213 	}
2214 
2215 	for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
2216 		udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
2217 		if (!(udcisr1 & UDCISR_INT_MASK))
2218 			continue;
2219 
2220 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2221 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2222 			ep = &udc->pxa_ep[i];
2223 			ep->stats.irqs++;
2224 			handle_ep(ep);
2225 		}
2226 	}
2227 
2228 }
2229 
2230 /**
2231  * irq_udc_suspend - Handle IRQ "UDC Suspend"
2232  * @udc: udc device
2233  */
2234 static void irq_udc_suspend(struct pxa_udc *udc)
2235 {
2236 	udc_writel(udc, UDCISR1, UDCISR1_IRSU);
2237 	udc->stats.irqs_suspend++;
2238 
2239 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2240 			&& udc->driver && udc->driver->suspend)
2241 		udc->driver->suspend(&udc->gadget);
2242 	ep0_idle(udc);
2243 }
2244 
2245 /**
2246   * irq_udc_resume - Handle IRQ "UDC Resume"
2247   * @udc: udc device
2248   */
2249 static void irq_udc_resume(struct pxa_udc *udc)
2250 {
2251 	udc_writel(udc, UDCISR1, UDCISR1_IRRU);
2252 	udc->stats.irqs_resume++;
2253 
2254 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2255 			&& udc->driver && udc->driver->resume)
2256 		udc->driver->resume(&udc->gadget);
2257 }
2258 
2259 /**
2260  * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
2261  * @udc: udc device
2262  */
2263 static void irq_udc_reconfig(struct pxa_udc *udc)
2264 {
2265 	unsigned config, interface, alternate, config_change;
2266 	u32 udccr = udc_readl(udc, UDCCR);
2267 
2268 	udc_writel(udc, UDCISR1, UDCISR1_IRCC);
2269 	udc->stats.irqs_reconfig++;
2270 
2271 	config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
2272 	config_change = (config != udc->config);
2273 	pxa27x_change_configuration(udc, config);
2274 
2275 	interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
2276 	alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
2277 	pxa27x_change_interface(udc, interface, alternate);
2278 
2279 	if (config_change)
2280 		update_pxa_ep_matches(udc);
2281 	udc_set_mask_UDCCR(udc, UDCCR_SMAC);
2282 }
2283 
2284 /**
2285  * irq_udc_reset - Handle IRQ "UDC Reset"
2286  * @udc: udc device
2287  */
2288 static void irq_udc_reset(struct pxa_udc *udc)
2289 {
2290 	u32 udccr = udc_readl(udc, UDCCR);
2291 	struct pxa_ep *ep = &udc->pxa_ep[0];
2292 
2293 	dev_info(udc->dev, "USB reset\n");
2294 	udc_writel(udc, UDCISR1, UDCISR1_IRRS);
2295 	udc->stats.irqs_reset++;
2296 
2297 	if ((udccr & UDCCR_UDA) == 0) {
2298 		dev_dbg(udc->dev, "USB reset start\n");
2299 		stop_activity(udc, udc->driver);
2300 	}
2301 	udc->gadget.speed = USB_SPEED_FULL;
2302 	memset(&udc->stats, 0, sizeof udc->stats);
2303 
2304 	nuke(ep, -EPROTO);
2305 	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
2306 	ep0_idle(udc);
2307 }
2308 
2309 /**
2310  * pxa_udc_irq - Main irq handler
2311  * @irq: irq number
2312  * @_dev: udc device
2313  *
2314  * Handles all udc interrupts
2315  */
2316 static irqreturn_t pxa_udc_irq(int irq, void *_dev)
2317 {
2318 	struct pxa_udc *udc = _dev;
2319 	u32 udcisr0 = udc_readl(udc, UDCISR0);
2320 	u32 udcisr1 = udc_readl(udc, UDCISR1);
2321 	u32 udccr = udc_readl(udc, UDCCR);
2322 	u32 udcisr1_spec;
2323 
2324 	dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
2325 		 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
2326 
2327 	udcisr1_spec = udcisr1 & 0xf8000000;
2328 	if (unlikely(udcisr1_spec & UDCISR1_IRSU))
2329 		irq_udc_suspend(udc);
2330 	if (unlikely(udcisr1_spec & UDCISR1_IRRU))
2331 		irq_udc_resume(udc);
2332 	if (unlikely(udcisr1_spec & UDCISR1_IRCC))
2333 		irq_udc_reconfig(udc);
2334 	if (unlikely(udcisr1_spec & UDCISR1_IRRS))
2335 		irq_udc_reset(udc);
2336 
2337 	if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
2338 		irq_handle_data(irq, udc);
2339 
2340 	return IRQ_HANDLED;
2341 }
2342 
2343 static struct pxa_udc memory = {
2344 	.gadget = {
2345 		.ops		= &pxa_udc_ops,
2346 		.ep0		= &memory.udc_usb_ep[0].usb_ep,
2347 		.name		= driver_name,
2348 		.dev = {
2349 			.init_name	= "gadget",
2350 		},
2351 	},
2352 
2353 	.udc_usb_ep = {
2354 		USB_EP_CTRL,
2355 		USB_EP_OUT_BULK(1),
2356 		USB_EP_IN_BULK(2),
2357 		USB_EP_IN_ISO(3),
2358 		USB_EP_OUT_ISO(4),
2359 		USB_EP_IN_INT(5),
2360 	},
2361 
2362 	.pxa_ep = {
2363 		PXA_EP_CTRL,
2364 		/* Endpoints for gadget zero */
2365 		PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
2366 		PXA_EP_IN_BULK(2,  2, 3, 0, 0),
2367 		/* Endpoints for ether gadget, file storage gadget */
2368 		PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
2369 		PXA_EP_IN_BULK(4,  2, 1, 0, 0),
2370 		PXA_EP_IN_ISO(5,   3, 1, 0, 0),
2371 		PXA_EP_OUT_ISO(6,  4, 1, 0, 0),
2372 		PXA_EP_IN_INT(7,   5, 1, 0, 0),
2373 		/* Endpoints for RNDIS, serial */
2374 		PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
2375 		PXA_EP_IN_BULK(9,  2, 2, 0, 0),
2376 		PXA_EP_IN_INT(10,  5, 2, 0, 0),
2377 		/*
2378 		 * All the following endpoints are only for completion.  They
2379 		 * won't never work, as multiple interfaces are really broken on
2380 		 * the pxa.
2381 		*/
2382 		PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
2383 		PXA_EP_IN_BULK(12,  2, 2, 1, 0),
2384 		/* Endpoint for CDC Ether */
2385 		PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
2386 		PXA_EP_IN_BULK(14,  2, 1, 1, 1),
2387 	}
2388 };
2389 
2390 #if defined(CONFIG_OF)
2391 static const struct of_device_id udc_pxa_dt_ids[] = {
2392 	{ .compatible = "marvell,pxa270-udc" },
2393 	{}
2394 };
2395 MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
2396 #endif
2397 
2398 /**
2399  * pxa_udc_probe - probes the udc device
2400  * @_dev: platform device
2401  *
2402  * Perform basic init : allocates udc clock, creates sysfs files, requests
2403  * irq.
2404  */
2405 static int pxa_udc_probe(struct platform_device *pdev)
2406 {
2407 	struct resource *regs;
2408 	struct pxa_udc *udc = &memory;
2409 	int retval = 0, gpio;
2410 	struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
2411 	unsigned long gpio_flags;
2412 
2413 	if (mach) {
2414 		gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
2415 		gpio = mach->gpio_pullup;
2416 		if (gpio_is_valid(gpio)) {
2417 			retval = devm_gpio_request_one(&pdev->dev, gpio,
2418 						       gpio_flags,
2419 						       "USB D+ pullup");
2420 			if (retval)
2421 				return retval;
2422 			udc->gpiod = gpio_to_desc(mach->gpio_pullup);
2423 		}
2424 		udc->udc_command = mach->udc_command;
2425 	} else {
2426 		udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
2427 	}
2428 
2429 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2430 	udc->regs = devm_ioremap_resource(&pdev->dev, regs);
2431 	if (IS_ERR(udc->regs))
2432 		return PTR_ERR(udc->regs);
2433 	udc->irq = platform_get_irq(pdev, 0);
2434 	if (udc->irq < 0)
2435 		return udc->irq;
2436 
2437 	udc->dev = &pdev->dev;
2438 	udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
2439 
2440 	if (IS_ERR(udc->gpiod)) {
2441 		dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
2442 			PTR_ERR(udc->gpiod));
2443 		return PTR_ERR(udc->gpiod);
2444 	}
2445 	if (udc->gpiod)
2446 		gpiod_direction_output(udc->gpiod, 0);
2447 
2448 	udc->clk = devm_clk_get(&pdev->dev, NULL);
2449 	if (IS_ERR(udc->clk))
2450 		return PTR_ERR(udc->clk);
2451 
2452 	retval = clk_prepare(udc->clk);
2453 	if (retval)
2454 		return retval;
2455 
2456 	udc->vbus_sensed = 0;
2457 
2458 	the_controller = udc;
2459 	platform_set_drvdata(pdev, udc);
2460 	udc_init_data(udc);
2461 
2462 	/* irq setup after old hardware state is cleaned up */
2463 	retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
2464 				  IRQF_SHARED, driver_name, udc);
2465 	if (retval != 0) {
2466 		dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
2467 			driver_name, udc->irq, retval);
2468 		goto err;
2469 	}
2470 
2471 	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2472 	if (retval)
2473 		goto err;
2474 
2475 	pxa_init_debugfs(udc);
2476 	if (should_enable_udc(udc))
2477 		udc_enable(udc);
2478 	return 0;
2479 err:
2480 	clk_unprepare(udc->clk);
2481 	return retval;
2482 }
2483 
2484 /**
2485  * pxa_udc_remove - removes the udc device driver
2486  * @_dev: platform device
2487  */
2488 static int pxa_udc_remove(struct platform_device *_dev)
2489 {
2490 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2491 
2492 	usb_del_gadget_udc(&udc->gadget);
2493 	pxa_cleanup_debugfs(udc);
2494 
2495 	usb_put_phy(udc->transceiver);
2496 
2497 	udc->transceiver = NULL;
2498 	the_controller = NULL;
2499 	clk_unprepare(udc->clk);
2500 
2501 	return 0;
2502 }
2503 
2504 static void pxa_udc_shutdown(struct platform_device *_dev)
2505 {
2506 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2507 
2508 	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
2509 		udc_disable(udc);
2510 }
2511 
2512 #ifdef CONFIG_PXA27x
2513 extern void pxa27x_clear_otgph(void);
2514 #else
2515 #define pxa27x_clear_otgph()   do {} while (0)
2516 #endif
2517 
2518 #ifdef CONFIG_PM
2519 /**
2520  * pxa_udc_suspend - Suspend udc device
2521  * @_dev: platform device
2522  * @state: suspend state
2523  *
2524  * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
2525  * device.
2526  */
2527 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2528 {
2529 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2530 	struct pxa_ep *ep;
2531 
2532 	ep = &udc->pxa_ep[0];
2533 	udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
2534 
2535 	udc_disable(udc);
2536 	udc->pullup_resume = udc->pullup_on;
2537 	dplus_pullup(udc, 0);
2538 
2539 	return 0;
2540 }
2541 
2542 /**
2543  * pxa_udc_resume - Resume udc device
2544  * @_dev: platform device
2545  *
2546  * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
2547  * device.
2548  */
2549 static int pxa_udc_resume(struct platform_device *_dev)
2550 {
2551 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2552 	struct pxa_ep *ep;
2553 
2554 	ep = &udc->pxa_ep[0];
2555 	udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
2556 
2557 	dplus_pullup(udc, udc->pullup_resume);
2558 	if (should_enable_udc(udc))
2559 		udc_enable(udc);
2560 	/*
2561 	 * We do not handle OTG yet.
2562 	 *
2563 	 * OTGPH bit is set when sleep mode is entered.
2564 	 * it indicates that OTG pad is retaining its state.
2565 	 * Upon exit from sleep mode and before clearing OTGPH,
2566 	 * Software must configure the USB OTG pad, UDC, and UHC
2567 	 * to the state they were in before entering sleep mode.
2568 	 */
2569 	pxa27x_clear_otgph();
2570 
2571 	return 0;
2572 }
2573 #endif
2574 
2575 /* work with hotplug and coldplug */
2576 MODULE_ALIAS("platform:pxa27x-udc");
2577 
2578 static struct platform_driver udc_driver = {
2579 	.driver		= {
2580 		.name	= "pxa27x-udc",
2581 		.of_match_table = of_match_ptr(udc_pxa_dt_ids),
2582 	},
2583 	.probe		= pxa_udc_probe,
2584 	.remove		= pxa_udc_remove,
2585 	.shutdown	= pxa_udc_shutdown,
2586 #ifdef CONFIG_PM
2587 	.suspend	= pxa_udc_suspend,
2588 	.resume		= pxa_udc_resume
2589 #endif
2590 };
2591 
2592 module_platform_driver(udc_driver);
2593 
2594 MODULE_DESCRIPTION(DRIVER_DESC);
2595 MODULE_AUTHOR("Robert Jarzmik");
2596 MODULE_LICENSE("GPL");
2597