xref: /linux/drivers/usb/gadget/udc/pxa27x_udc.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*
2  * Handles the Intel 27x USB Device Controller (UDC)
3  *
4  * Inspired by original driver by Frank Becker, David Brownell, and others.
5  * Copyright (C) 2008 Robert Jarzmik
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
22 #include <linux/clk.h>
23 #include <linux/irq.h>
24 #include <linux/gpio.h>
25 #include <linux/gpio/consumer.h>
26 #include <linux/slab.h>
27 #include <linux/prefetch.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/platform_data/pxa2xx_udc.h>
30 #include <linux/of_device.h>
31 #include <linux/of_gpio.h>
32 
33 #include <linux/usb.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 
37 #include "pxa27x_udc.h"
38 
39 /*
40  * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
41  * series processors.
42  *
43  * Such controller drivers work with a gadget driver.  The gadget driver
44  * returns descriptors, implements configuration and data protocols used
45  * by the host to interact with this device, and allocates endpoints to
46  * the different protocol interfaces.  The controller driver virtualizes
47  * usb hardware so that the gadget drivers will be more portable.
48  *
49  * This UDC hardware wants to implement a bit too much USB protocol. The
50  * biggest issues are:  that the endpoints have to be set up before the
51  * controller can be enabled (minor, and not uncommon); and each endpoint
52  * can only have one configuration, interface and alternative interface
53  * number (major, and very unusual). Once set up, these cannot be changed
54  * without a controller reset.
55  *
56  * The workaround is to setup all combinations necessary for the gadgets which
57  * will work with this driver. This is done in pxa_udc structure, statically.
58  * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
59  * (You could modify this if needed.  Some drivers have a "fifo_mode" module
60  * parameter to facilitate such changes.)
61  *
62  * The combinations have been tested with these gadgets :
63  *  - zero gadget
64  *  - file storage gadget
65  *  - ether gadget
66  *
67  * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
68  * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
69  *
70  * All the requests are handled the same way :
71  *  - the drivers tries to handle the request directly to the IO
72  *  - if the IO fifo is not big enough, the remaining is send/received in
73  *    interrupt handling.
74  */
75 
76 #define	DRIVER_VERSION	"2008-04-18"
77 #define	DRIVER_DESC	"PXA 27x USB Device Controller driver"
78 
79 static const char driver_name[] = "pxa27x_udc";
80 static struct pxa_udc *the_controller;
81 
82 static void handle_ep(struct pxa_ep *ep);
83 
84 /*
85  * Debug filesystem
86  */
87 #ifdef CONFIG_USB_GADGET_DEBUG_FS
88 
89 #include <linux/debugfs.h>
90 #include <linux/uaccess.h>
91 #include <linux/seq_file.h>
92 
93 static int state_dbg_show(struct seq_file *s, void *p)
94 {
95 	struct pxa_udc *udc = s->private;
96 	u32 tmp;
97 
98 	if (!udc->driver)
99 		return -ENODEV;
100 
101 	/* basic device status */
102 	seq_printf(s, DRIVER_DESC "\n"
103 		   "%s version: %s\n"
104 		   "Gadget driver: %s\n",
105 		   driver_name, DRIVER_VERSION,
106 		   udc->driver ? udc->driver->driver.name : "(none)");
107 
108 	tmp = udc_readl(udc, UDCCR);
109 	seq_printf(s,
110 		   "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n",
111 		   tmp,
112 		   (tmp & UDCCR_OEN) ? " oen":"",
113 		   (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
114 		   (tmp & UDCCR_AHNP) ? " rem" : "",
115 		   (tmp & UDCCR_BHNP) ? " rstir" : "",
116 		   (tmp & UDCCR_DWRE) ? " dwre" : "",
117 		   (tmp & UDCCR_SMAC) ? " smac" : "",
118 		   (tmp & UDCCR_EMCE) ? " emce" : "",
119 		   (tmp & UDCCR_UDR) ? " udr" : "",
120 		   (tmp & UDCCR_UDA) ? " uda" : "",
121 		   (tmp & UDCCR_UDE) ? " ude" : "",
122 		   (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
123 		   (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
124 		   (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
125 	/* registers for device and ep0 */
126 	seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
127 		   udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
128 	seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
129 		   udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
130 	seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
131 	seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n",
132 		   udc->stats.irqs_reset, udc->stats.irqs_suspend,
133 		   udc->stats.irqs_resume, udc->stats.irqs_reconfig);
134 
135 	return 0;
136 }
137 
138 static int queues_dbg_show(struct seq_file *s, void *p)
139 {
140 	struct pxa_udc *udc = s->private;
141 	struct pxa_ep *ep;
142 	struct pxa27x_request *req;
143 	int i, maxpkt;
144 
145 	if (!udc->driver)
146 		return -ENODEV;
147 
148 	/* dump endpoint queues */
149 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
150 		ep = &udc->pxa_ep[i];
151 		maxpkt = ep->fifo_size;
152 		seq_printf(s,  "%-12s max_pkt=%d %s\n",
153 			   EPNAME(ep), maxpkt, "pio");
154 
155 		if (list_empty(&ep->queue)) {
156 			seq_puts(s, "\t(nothing queued)\n");
157 			continue;
158 		}
159 
160 		list_for_each_entry(req, &ep->queue, queue) {
161 			seq_printf(s,  "\treq %p len %d/%d buf %p\n",
162 				   &req->req, req->req.actual,
163 				   req->req.length, req->req.buf);
164 		}
165 	}
166 
167 	return 0;
168 }
169 
170 static int eps_dbg_show(struct seq_file *s, void *p)
171 {
172 	struct pxa_udc *udc = s->private;
173 	struct pxa_ep *ep;
174 	int i;
175 	u32 tmp;
176 
177 	if (!udc->driver)
178 		return -ENODEV;
179 
180 	ep = &udc->pxa_ep[0];
181 	tmp = udc_ep_readl(ep, UDCCSR);
182 	seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n",
183 		   tmp,
184 		   (tmp & UDCCSR0_SA) ? " sa" : "",
185 		   (tmp & UDCCSR0_RNE) ? " rne" : "",
186 		   (tmp & UDCCSR0_FST) ? " fst" : "",
187 		   (tmp & UDCCSR0_SST) ? " sst" : "",
188 		   (tmp & UDCCSR0_DME) ? " dme" : "",
189 		   (tmp & UDCCSR0_IPR) ? " ipr" : "",
190 		   (tmp & UDCCSR0_OPC) ? " opc" : "");
191 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
192 		ep = &udc->pxa_ep[i];
193 		tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
194 		seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n",
195 			   EPNAME(ep),
196 			   ep->stats.in_bytes, ep->stats.in_ops,
197 			   ep->stats.out_bytes, ep->stats.out_ops,
198 			   ep->stats.irqs,
199 			   tmp, udc_ep_readl(ep, UDCCSR),
200 			   udc_ep_readl(ep, UDCBCR));
201 	}
202 
203 	return 0;
204 }
205 
206 static int eps_dbg_open(struct inode *inode, struct file *file)
207 {
208 	return single_open(file, eps_dbg_show, inode->i_private);
209 }
210 
211 static int queues_dbg_open(struct inode *inode, struct file *file)
212 {
213 	return single_open(file, queues_dbg_show, inode->i_private);
214 }
215 
216 static int state_dbg_open(struct inode *inode, struct file *file)
217 {
218 	return single_open(file, state_dbg_show, inode->i_private);
219 }
220 
221 static const struct file_operations state_dbg_fops = {
222 	.owner		= THIS_MODULE,
223 	.open		= state_dbg_open,
224 	.llseek		= seq_lseek,
225 	.read		= seq_read,
226 	.release	= single_release,
227 };
228 
229 static const struct file_operations queues_dbg_fops = {
230 	.owner		= THIS_MODULE,
231 	.open		= queues_dbg_open,
232 	.llseek		= seq_lseek,
233 	.read		= seq_read,
234 	.release	= single_release,
235 };
236 
237 static const struct file_operations eps_dbg_fops = {
238 	.owner		= THIS_MODULE,
239 	.open		= eps_dbg_open,
240 	.llseek		= seq_lseek,
241 	.read		= seq_read,
242 	.release	= single_release,
243 };
244 
245 static void pxa_init_debugfs(struct pxa_udc *udc)
246 {
247 	struct dentry *root, *state, *queues, *eps;
248 
249 	root = debugfs_create_dir(udc->gadget.name, NULL);
250 	if (IS_ERR(root) || !root)
251 		goto err_root;
252 
253 	state = debugfs_create_file("udcstate", 0400, root, udc,
254 			&state_dbg_fops);
255 	if (!state)
256 		goto err_state;
257 	queues = debugfs_create_file("queues", 0400, root, udc,
258 			&queues_dbg_fops);
259 	if (!queues)
260 		goto err_queues;
261 	eps = debugfs_create_file("epstate", 0400, root, udc,
262 			&eps_dbg_fops);
263 	if (!eps)
264 		goto err_eps;
265 
266 	udc->debugfs_root = root;
267 	udc->debugfs_state = state;
268 	udc->debugfs_queues = queues;
269 	udc->debugfs_eps = eps;
270 	return;
271 err_eps:
272 	debugfs_remove(eps);
273 err_queues:
274 	debugfs_remove(queues);
275 err_state:
276 	debugfs_remove(root);
277 err_root:
278 	dev_err(udc->dev, "debugfs is not available\n");
279 }
280 
281 static void pxa_cleanup_debugfs(struct pxa_udc *udc)
282 {
283 	debugfs_remove(udc->debugfs_eps);
284 	debugfs_remove(udc->debugfs_queues);
285 	debugfs_remove(udc->debugfs_state);
286 	debugfs_remove(udc->debugfs_root);
287 	udc->debugfs_eps = NULL;
288 	udc->debugfs_queues = NULL;
289 	udc->debugfs_state = NULL;
290 	udc->debugfs_root = NULL;
291 }
292 
293 #else
294 static inline void pxa_init_debugfs(struct pxa_udc *udc)
295 {
296 }
297 
298 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
299 {
300 }
301 #endif
302 
303 /**
304  * is_match_usb_pxa - check if usb_ep and pxa_ep match
305  * @udc_usb_ep: usb endpoint
306  * @ep: pxa endpoint
307  * @config: configuration required in pxa_ep
308  * @interface: interface required in pxa_ep
309  * @altsetting: altsetting required in pxa_ep
310  *
311  * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
312  */
313 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
314 		int config, int interface, int altsetting)
315 {
316 	if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
317 		return 0;
318 	if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
319 		return 0;
320 	if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
321 		return 0;
322 	if ((ep->config != config) || (ep->interface != interface)
323 			|| (ep->alternate != altsetting))
324 		return 0;
325 	return 1;
326 }
327 
328 /**
329  * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
330  * @udc: pxa udc
331  * @udc_usb_ep: udc_usb_ep structure
332  *
333  * Match udc_usb_ep and all pxa_ep available, to see if one matches.
334  * This is necessary because of the strong pxa hardware restriction requiring
335  * that once pxa endpoints are initialized, their configuration is freezed, and
336  * no change can be made to their address, direction, or in which configuration,
337  * interface or altsetting they are active ... which differs from more usual
338  * models which have endpoints be roughly just addressable fifos, and leave
339  * configuration events up to gadget drivers (like all control messages).
340  *
341  * Note that there is still a blurred point here :
342  *   - we rely on UDCCR register "active interface" and "active altsetting".
343  *     This is a nonsense in regard of USB spec, where multiple interfaces are
344  *     active at the same time.
345  *   - if we knew for sure that the pxa can handle multiple interface at the
346  *     same time, assuming Intel's Developer Guide is wrong, this function
347  *     should be reviewed, and a cache of couples (iface, altsetting) should
348  *     be kept in the pxa_udc structure. In this case this function would match
349  *     against the cache of couples instead of the "last altsetting" set up.
350  *
351  * Returns the matched pxa_ep structure or NULL if none found
352  */
353 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
354 		struct udc_usb_ep *udc_usb_ep)
355 {
356 	int i;
357 	struct pxa_ep *ep;
358 	int cfg = udc->config;
359 	int iface = udc->last_interface;
360 	int alt = udc->last_alternate;
361 
362 	if (udc_usb_ep == &udc->udc_usb_ep[0])
363 		return &udc->pxa_ep[0];
364 
365 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
366 		ep = &udc->pxa_ep[i];
367 		if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
368 			return ep;
369 	}
370 	return NULL;
371 }
372 
373 /**
374  * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
375  * @udc: pxa udc
376  *
377  * Context: in_interrupt()
378  *
379  * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
380  * previously set up (and is not NULL). The update is necessary is a
381  * configuration change or altsetting change was issued by the USB host.
382  */
383 static void update_pxa_ep_matches(struct pxa_udc *udc)
384 {
385 	int i;
386 	struct udc_usb_ep *udc_usb_ep;
387 
388 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
389 		udc_usb_ep = &udc->udc_usb_ep[i];
390 		if (udc_usb_ep->pxa_ep)
391 			udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
392 	}
393 }
394 
395 /**
396  * pio_irq_enable - Enables irq generation for one endpoint
397  * @ep: udc endpoint
398  */
399 static void pio_irq_enable(struct pxa_ep *ep)
400 {
401 	struct pxa_udc *udc = ep->dev;
402 	int index = EPIDX(ep);
403 	u32 udcicr0 = udc_readl(udc, UDCICR0);
404 	u32 udcicr1 = udc_readl(udc, UDCICR1);
405 
406 	if (index < 16)
407 		udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
408 	else
409 		udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
410 }
411 
412 /**
413  * pio_irq_disable - Disables irq generation for one endpoint
414  * @ep: udc endpoint
415  */
416 static void pio_irq_disable(struct pxa_ep *ep)
417 {
418 	struct pxa_udc *udc = ep->dev;
419 	int index = EPIDX(ep);
420 	u32 udcicr0 = udc_readl(udc, UDCICR0);
421 	u32 udcicr1 = udc_readl(udc, UDCICR1);
422 
423 	if (index < 16)
424 		udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
425 	else
426 		udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
427 }
428 
429 /**
430  * udc_set_mask_UDCCR - set bits in UDCCR
431  * @udc: udc device
432  * @mask: bits to set in UDCCR
433  *
434  * Sets bits in UDCCR, leaving DME and FST bits as they were.
435  */
436 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
437 {
438 	u32 udccr = udc_readl(udc, UDCCR);
439 	udc_writel(udc, UDCCR,
440 			(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
441 }
442 
443 /**
444  * udc_clear_mask_UDCCR - clears bits in UDCCR
445  * @udc: udc device
446  * @mask: bit to clear in UDCCR
447  *
448  * Clears bits in UDCCR, leaving DME and FST bits as they were.
449  */
450 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
451 {
452 	u32 udccr = udc_readl(udc, UDCCR);
453 	udc_writel(udc, UDCCR,
454 			(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
455 }
456 
457 /**
458  * ep_write_UDCCSR - set bits in UDCCSR
459  * @udc: udc device
460  * @mask: bits to set in UDCCR
461  *
462  * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
463  *
464  * A specific case is applied to ep0 : the ACM bit is always set to 1, for
465  * SET_INTERFACE and SET_CONFIGURATION.
466  */
467 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
468 {
469 	if (is_ep0(ep))
470 		mask |= UDCCSR0_ACM;
471 	udc_ep_writel(ep, UDCCSR, mask);
472 }
473 
474 /**
475  * ep_count_bytes_remain - get how many bytes in udc endpoint
476  * @ep: udc endpoint
477  *
478  * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
479  */
480 static int ep_count_bytes_remain(struct pxa_ep *ep)
481 {
482 	if (ep->dir_in)
483 		return -EOPNOTSUPP;
484 	return udc_ep_readl(ep, UDCBCR) & 0x3ff;
485 }
486 
487 /**
488  * ep_is_empty - checks if ep has byte ready for reading
489  * @ep: udc endpoint
490  *
491  * If endpoint is the control endpoint, checks if there are bytes in the
492  * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
493  * are ready for reading on OUT endpoint.
494  *
495  * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
496  */
497 static int ep_is_empty(struct pxa_ep *ep)
498 {
499 	int ret;
500 
501 	if (!is_ep0(ep) && ep->dir_in)
502 		return -EOPNOTSUPP;
503 	if (is_ep0(ep))
504 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
505 	else
506 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
507 	return ret;
508 }
509 
510 /**
511  * ep_is_full - checks if ep has place to write bytes
512  * @ep: udc endpoint
513  *
514  * If endpoint is not the control endpoint and is an IN endpoint, checks if
515  * there is place to write bytes into the endpoint.
516  *
517  * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
518  */
519 static int ep_is_full(struct pxa_ep *ep)
520 {
521 	if (is_ep0(ep))
522 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
523 	if (!ep->dir_in)
524 		return -EOPNOTSUPP;
525 	return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
526 }
527 
528 /**
529  * epout_has_pkt - checks if OUT endpoint fifo has a packet available
530  * @ep: pxa endpoint
531  *
532  * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
533  */
534 static int epout_has_pkt(struct pxa_ep *ep)
535 {
536 	if (!is_ep0(ep) && ep->dir_in)
537 		return -EOPNOTSUPP;
538 	if (is_ep0(ep))
539 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
540 	return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
541 }
542 
543 /**
544  * set_ep0state - Set ep0 automata state
545  * @dev: udc device
546  * @state: state
547  */
548 static void set_ep0state(struct pxa_udc *udc, int state)
549 {
550 	struct pxa_ep *ep = &udc->pxa_ep[0];
551 	char *old_stname = EP0_STNAME(udc);
552 
553 	udc->ep0state = state;
554 	ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
555 		EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
556 		udc_ep_readl(ep, UDCBCR));
557 }
558 
559 /**
560  * ep0_idle - Put control endpoint into idle state
561  * @dev: udc device
562  */
563 static void ep0_idle(struct pxa_udc *dev)
564 {
565 	set_ep0state(dev, WAIT_FOR_SETUP);
566 }
567 
568 /**
569  * inc_ep_stats_reqs - Update ep stats counts
570  * @ep: physical endpoint
571  * @req: usb request
572  * @is_in: ep direction (USB_DIR_IN or 0)
573  *
574  */
575 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
576 {
577 	if (is_in)
578 		ep->stats.in_ops++;
579 	else
580 		ep->stats.out_ops++;
581 }
582 
583 /**
584  * inc_ep_stats_bytes - Update ep stats counts
585  * @ep: physical endpoint
586  * @count: bytes transferred on endpoint
587  * @is_in: ep direction (USB_DIR_IN or 0)
588  */
589 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
590 {
591 	if (is_in)
592 		ep->stats.in_bytes += count;
593 	else
594 		ep->stats.out_bytes += count;
595 }
596 
597 /**
598  * pxa_ep_setup - Sets up an usb physical endpoint
599  * @ep: pxa27x physical endpoint
600  *
601  * Find the physical pxa27x ep, and setup its UDCCR
602  */
603 static void pxa_ep_setup(struct pxa_ep *ep)
604 {
605 	u32 new_udccr;
606 
607 	new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
608 		| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
609 		| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
610 		| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
611 		| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
612 		| ((ep->dir_in) ? UDCCONR_ED : 0)
613 		| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
614 		| UDCCONR_EE;
615 
616 	udc_ep_writel(ep, UDCCR, new_udccr);
617 }
618 
619 /**
620  * pxa_eps_setup - Sets up all usb physical endpoints
621  * @dev: udc device
622  *
623  * Setup all pxa physical endpoints, except ep0
624  */
625 static void pxa_eps_setup(struct pxa_udc *dev)
626 {
627 	unsigned int i;
628 
629 	dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
630 
631 	for (i = 1; i < NR_PXA_ENDPOINTS; i++)
632 		pxa_ep_setup(&dev->pxa_ep[i]);
633 }
634 
635 /**
636  * pxa_ep_alloc_request - Allocate usb request
637  * @_ep: usb endpoint
638  * @gfp_flags:
639  *
640  * For the pxa27x, these can just wrap kmalloc/kfree.  gadget drivers
641  * must still pass correctly initialized endpoints, since other controller
642  * drivers may care about how it's currently set up (dma issues etc).
643   */
644 static struct usb_request *
645 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
646 {
647 	struct pxa27x_request *req;
648 
649 	req = kzalloc(sizeof *req, gfp_flags);
650 	if (!req)
651 		return NULL;
652 
653 	INIT_LIST_HEAD(&req->queue);
654 	req->in_use = 0;
655 	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
656 
657 	return &req->req;
658 }
659 
660 /**
661  * pxa_ep_free_request - Free usb request
662  * @_ep: usb endpoint
663  * @_req: usb request
664  *
665  * Wrapper around kfree to free _req
666  */
667 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
668 {
669 	struct pxa27x_request *req;
670 
671 	req = container_of(_req, struct pxa27x_request, req);
672 	WARN_ON(!list_empty(&req->queue));
673 	kfree(req);
674 }
675 
676 /**
677  * ep_add_request - add a request to the endpoint's queue
678  * @ep: usb endpoint
679  * @req: usb request
680  *
681  * Context: ep->lock held
682  *
683  * Queues the request in the endpoint's queue, and enables the interrupts
684  * on the endpoint.
685  */
686 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
687 {
688 	if (unlikely(!req))
689 		return;
690 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
691 		req->req.length, udc_ep_readl(ep, UDCCSR));
692 
693 	req->in_use = 1;
694 	list_add_tail(&req->queue, &ep->queue);
695 	pio_irq_enable(ep);
696 }
697 
698 /**
699  * ep_del_request - removes a request from the endpoint's queue
700  * @ep: usb endpoint
701  * @req: usb request
702  *
703  * Context: ep->lock held
704  *
705  * Unqueue the request from the endpoint's queue. If there are no more requests
706  * on the endpoint, and if it's not the control endpoint, interrupts are
707  * disabled on the endpoint.
708  */
709 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
710 {
711 	if (unlikely(!req))
712 		return;
713 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
714 		req->req.length, udc_ep_readl(ep, UDCCSR));
715 
716 	list_del_init(&req->queue);
717 	req->in_use = 0;
718 	if (!is_ep0(ep) && list_empty(&ep->queue))
719 		pio_irq_disable(ep);
720 }
721 
722 /**
723  * req_done - Complete an usb request
724  * @ep: pxa physical endpoint
725  * @req: pxa request
726  * @status: usb request status sent to gadget API
727  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
728  *
729  * Context: ep->lock held if flags not NULL, else ep->lock released
730  *
731  * Retire a pxa27x usb request. Endpoint must be locked.
732  */
733 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
734 	unsigned long *pflags)
735 {
736 	unsigned long	flags;
737 
738 	ep_del_request(ep, req);
739 	if (likely(req->req.status == -EINPROGRESS))
740 		req->req.status = status;
741 	else
742 		status = req->req.status;
743 
744 	if (status && status != -ESHUTDOWN)
745 		ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
746 			&req->req, status,
747 			req->req.actual, req->req.length);
748 
749 	if (pflags)
750 		spin_unlock_irqrestore(&ep->lock, *pflags);
751 	local_irq_save(flags);
752 	usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
753 	local_irq_restore(flags);
754 	if (pflags)
755 		spin_lock_irqsave(&ep->lock, *pflags);
756 }
757 
758 /**
759  * ep_end_out_req - Ends endpoint OUT request
760  * @ep: physical endpoint
761  * @req: pxa request
762  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
763  *
764  * Context: ep->lock held or released (see req_done())
765  *
766  * Ends endpoint OUT request (completes usb request).
767  */
768 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
769 	unsigned long *pflags)
770 {
771 	inc_ep_stats_reqs(ep, !USB_DIR_IN);
772 	req_done(ep, req, 0, pflags);
773 }
774 
775 /**
776  * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
777  * @ep: physical endpoint
778  * @req: pxa request
779  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
780  *
781  * Context: ep->lock held or released (see req_done())
782  *
783  * Ends control endpoint OUT request (completes usb request), and puts
784  * control endpoint into idle state
785  */
786 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
787 	unsigned long *pflags)
788 {
789 	set_ep0state(ep->dev, OUT_STATUS_STAGE);
790 	ep_end_out_req(ep, req, pflags);
791 	ep0_idle(ep->dev);
792 }
793 
794 /**
795  * ep_end_in_req - Ends endpoint IN request
796  * @ep: physical endpoint
797  * @req: pxa request
798  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
799  *
800  * Context: ep->lock held or released (see req_done())
801  *
802  * Ends endpoint IN request (completes usb request).
803  */
804 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
805 	unsigned long *pflags)
806 {
807 	inc_ep_stats_reqs(ep, USB_DIR_IN);
808 	req_done(ep, req, 0, pflags);
809 }
810 
811 /**
812  * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
813  * @ep: physical endpoint
814  * @req: pxa request
815  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
816  *
817  * Context: ep->lock held or released (see req_done())
818  *
819  * Ends control endpoint IN request (completes usb request), and puts
820  * control endpoint into status state
821  */
822 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
823 	unsigned long *pflags)
824 {
825 	set_ep0state(ep->dev, IN_STATUS_STAGE);
826 	ep_end_in_req(ep, req, pflags);
827 }
828 
829 /**
830  * nuke - Dequeue all requests
831  * @ep: pxa endpoint
832  * @status: usb request status
833  *
834  * Context: ep->lock released
835  *
836  * Dequeues all requests on an endpoint. As a side effect, interrupts will be
837  * disabled on that endpoint (because no more requests).
838  */
839 static void nuke(struct pxa_ep *ep, int status)
840 {
841 	struct pxa27x_request	*req;
842 	unsigned long		flags;
843 
844 	spin_lock_irqsave(&ep->lock, flags);
845 	while (!list_empty(&ep->queue)) {
846 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
847 		req_done(ep, req, status, &flags);
848 	}
849 	spin_unlock_irqrestore(&ep->lock, flags);
850 }
851 
852 /**
853  * read_packet - transfer 1 packet from an OUT endpoint into request
854  * @ep: pxa physical endpoint
855  * @req: usb request
856  *
857  * Takes bytes from OUT endpoint and transfers them info the usb request.
858  * If there is less space in request than bytes received in OUT endpoint,
859  * bytes are left in the OUT endpoint.
860  *
861  * Returns how many bytes were actually transferred
862  */
863 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
864 {
865 	u32 *buf;
866 	int bytes_ep, bufferspace, count, i;
867 
868 	bytes_ep = ep_count_bytes_remain(ep);
869 	bufferspace = req->req.length - req->req.actual;
870 
871 	buf = (u32 *)(req->req.buf + req->req.actual);
872 	prefetchw(buf);
873 
874 	if (likely(!ep_is_empty(ep)))
875 		count = min(bytes_ep, bufferspace);
876 	else /* zlp */
877 		count = 0;
878 
879 	for (i = count; i > 0; i -= 4)
880 		*buf++ = udc_ep_readl(ep, UDCDR);
881 	req->req.actual += count;
882 
883 	ep_write_UDCCSR(ep, UDCCSR_PC);
884 
885 	return count;
886 }
887 
888 /**
889  * write_packet - transfer 1 packet from request into an IN endpoint
890  * @ep: pxa physical endpoint
891  * @req: usb request
892  * @max: max bytes that fit into endpoint
893  *
894  * Takes bytes from usb request, and transfers them into the physical
895  * endpoint. If there are no bytes to transfer, doesn't write anything
896  * to physical endpoint.
897  *
898  * Returns how many bytes were actually transferred.
899  */
900 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
901 			unsigned int max)
902 {
903 	int length, count, remain, i;
904 	u32 *buf;
905 	u8 *buf_8;
906 
907 	buf = (u32 *)(req->req.buf + req->req.actual);
908 	prefetch(buf);
909 
910 	length = min(req->req.length - req->req.actual, max);
911 	req->req.actual += length;
912 
913 	remain = length & 0x3;
914 	count = length & ~(0x3);
915 	for (i = count; i > 0 ; i -= 4)
916 		udc_ep_writel(ep, UDCDR, *buf++);
917 
918 	buf_8 = (u8 *)buf;
919 	for (i = remain; i > 0; i--)
920 		udc_ep_writeb(ep, UDCDR, *buf_8++);
921 
922 	ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
923 		udc_ep_readl(ep, UDCCSR));
924 
925 	return length;
926 }
927 
928 /**
929  * read_fifo - Transfer packets from OUT endpoint into usb request
930  * @ep: pxa physical endpoint
931  * @req: usb request
932  *
933  * Context: callable when in_interrupt()
934  *
935  * Unload as many packets as possible from the fifo we use for usb OUT
936  * transfers and put them into the request. Caller should have made sure
937  * there's at least one packet ready.
938  * Doesn't complete the request, that's the caller's job
939  *
940  * Returns 1 if the request completed, 0 otherwise
941  */
942 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
943 {
944 	int count, is_short, completed = 0;
945 
946 	while (epout_has_pkt(ep)) {
947 		count = read_packet(ep, req);
948 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
949 
950 		is_short = (count < ep->fifo_size);
951 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
952 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
953 			&req->req, req->req.actual, req->req.length);
954 
955 		/* completion */
956 		if (is_short || req->req.actual == req->req.length) {
957 			completed = 1;
958 			break;
959 		}
960 		/* finished that packet.  the next one may be waiting... */
961 	}
962 	return completed;
963 }
964 
965 /**
966  * write_fifo - transfer packets from usb request into an IN endpoint
967  * @ep: pxa physical endpoint
968  * @req: pxa usb request
969  *
970  * Write to an IN endpoint fifo, as many packets as possible.
971  * irqs will use this to write the rest later.
972  * caller guarantees at least one packet buffer is ready (or a zlp).
973  * Doesn't complete the request, that's the caller's job
974  *
975  * Returns 1 if request fully transferred, 0 if partial transfer
976  */
977 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
978 {
979 	unsigned max;
980 	int count, is_short, is_last = 0, completed = 0, totcount = 0;
981 	u32 udccsr;
982 
983 	max = ep->fifo_size;
984 	do {
985 		is_short = 0;
986 
987 		udccsr = udc_ep_readl(ep, UDCCSR);
988 		if (udccsr & UDCCSR_PC) {
989 			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
990 				udccsr);
991 			ep_write_UDCCSR(ep, UDCCSR_PC);
992 		}
993 		if (udccsr & UDCCSR_TRN) {
994 			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
995 				udccsr);
996 			ep_write_UDCCSR(ep, UDCCSR_TRN);
997 		}
998 
999 		count = write_packet(ep, req, max);
1000 		inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1001 		totcount += count;
1002 
1003 		/* last packet is usually short (or a zlp) */
1004 		if (unlikely(count < max)) {
1005 			is_last = 1;
1006 			is_short = 1;
1007 		} else {
1008 			if (likely(req->req.length > req->req.actual)
1009 					|| req->req.zero)
1010 				is_last = 0;
1011 			else
1012 				is_last = 1;
1013 			/* interrupt/iso maxpacket may not fill the fifo */
1014 			is_short = unlikely(max < ep->fifo_size);
1015 		}
1016 
1017 		if (is_short)
1018 			ep_write_UDCCSR(ep, UDCCSR_SP);
1019 
1020 		/* requests complete when all IN data is in the FIFO */
1021 		if (is_last) {
1022 			completed = 1;
1023 			break;
1024 		}
1025 	} while (!ep_is_full(ep));
1026 
1027 	ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
1028 			totcount, is_last ? "/L" : "", is_short ? "/S" : "",
1029 			req->req.length - req->req.actual, &req->req);
1030 
1031 	return completed;
1032 }
1033 
1034 /**
1035  * read_ep0_fifo - Transfer packets from control endpoint into usb request
1036  * @ep: control endpoint
1037  * @req: pxa usb request
1038  *
1039  * Special ep0 version of the above read_fifo. Reads as many bytes from control
1040  * endpoint as can be read, and stores them into usb request (limited by request
1041  * maximum length).
1042  *
1043  * Returns 0 if usb request only partially filled, 1 if fully filled
1044  */
1045 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1046 {
1047 	int count, is_short, completed = 0;
1048 
1049 	while (epout_has_pkt(ep)) {
1050 		count = read_packet(ep, req);
1051 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1052 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1053 
1054 		is_short = (count < ep->fifo_size);
1055 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
1056 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
1057 			&req->req, req->req.actual, req->req.length);
1058 
1059 		if (is_short || req->req.actual >= req->req.length) {
1060 			completed = 1;
1061 			break;
1062 		}
1063 	}
1064 
1065 	return completed;
1066 }
1067 
1068 /**
1069  * write_ep0_fifo - Send a request to control endpoint (ep0 in)
1070  * @ep: control endpoint
1071  * @req: request
1072  *
1073  * Context: callable when in_interrupt()
1074  *
1075  * Sends a request (or a part of the request) to the control endpoint (ep0 in).
1076  * If the request doesn't fit, the remaining part will be sent from irq.
1077  * The request is considered fully written only if either :
1078  *   - last write transferred all remaining bytes, but fifo was not fully filled
1079  *   - last write was a 0 length write
1080  *
1081  * Returns 1 if request fully written, 0 if request only partially sent
1082  */
1083 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1084 {
1085 	unsigned	count;
1086 	int		is_last, is_short;
1087 
1088 	count = write_packet(ep, req, EP0_FIFO_SIZE);
1089 	inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1090 
1091 	is_short = (count < EP0_FIFO_SIZE);
1092 	is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
1093 
1094 	/* Sends either a short packet or a 0 length packet */
1095 	if (unlikely(is_short))
1096 		ep_write_UDCCSR(ep, UDCCSR0_IPR);
1097 
1098 	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1099 		count, is_short ? "/S" : "", is_last ? "/L" : "",
1100 		req->req.length - req->req.actual,
1101 		&req->req, udc_ep_readl(ep, UDCCSR));
1102 
1103 	return is_last;
1104 }
1105 
1106 /**
1107  * pxa_ep_queue - Queue a request into an IN endpoint
1108  * @_ep: usb endpoint
1109  * @_req: usb request
1110  * @gfp_flags: flags
1111  *
1112  * Context: normally called when !in_interrupt, but callable when in_interrupt()
1113  * in the special case of ep0 setup :
1114  *   (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
1115  *
1116  * Returns 0 if succedeed, error otherwise
1117  */
1118 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1119 			gfp_t gfp_flags)
1120 {
1121 	struct udc_usb_ep	*udc_usb_ep;
1122 	struct pxa_ep		*ep;
1123 	struct pxa27x_request	*req;
1124 	struct pxa_udc		*dev;
1125 	unsigned long		flags;
1126 	int			rc = 0;
1127 	int			is_first_req;
1128 	unsigned		length;
1129 	int			recursion_detected;
1130 
1131 	req = container_of(_req, struct pxa27x_request, req);
1132 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1133 
1134 	if (unlikely(!_req || !_req->complete || !_req->buf))
1135 		return -EINVAL;
1136 
1137 	if (unlikely(!_ep))
1138 		return -EINVAL;
1139 
1140 	dev = udc_usb_ep->dev;
1141 	ep = udc_usb_ep->pxa_ep;
1142 	if (unlikely(!ep))
1143 		return -EINVAL;
1144 
1145 	dev = ep->dev;
1146 	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1147 		ep_dbg(ep, "bogus device state\n");
1148 		return -ESHUTDOWN;
1149 	}
1150 
1151 	/* iso is always one packet per request, that's the only way
1152 	 * we can report per-packet status.  that also helps with dma.
1153 	 */
1154 	if (unlikely(EPXFERTYPE_is_ISO(ep)
1155 			&& req->req.length > ep->fifo_size))
1156 		return -EMSGSIZE;
1157 
1158 	spin_lock_irqsave(&ep->lock, flags);
1159 	recursion_detected = ep->in_handle_ep;
1160 
1161 	is_first_req = list_empty(&ep->queue);
1162 	ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
1163 			_req, is_first_req ? "yes" : "no",
1164 			_req->length, _req->buf);
1165 
1166 	if (!ep->enabled) {
1167 		_req->status = -ESHUTDOWN;
1168 		rc = -ESHUTDOWN;
1169 		goto out_locked;
1170 	}
1171 
1172 	if (req->in_use) {
1173 		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1174 		goto out_locked;
1175 	}
1176 
1177 	length = _req->length;
1178 	_req->status = -EINPROGRESS;
1179 	_req->actual = 0;
1180 
1181 	ep_add_request(ep, req);
1182 	spin_unlock_irqrestore(&ep->lock, flags);
1183 
1184 	if (is_ep0(ep)) {
1185 		switch (dev->ep0state) {
1186 		case WAIT_ACK_SET_CONF_INTERF:
1187 			if (length == 0) {
1188 				ep_end_in_req(ep, req, NULL);
1189 			} else {
1190 				ep_err(ep, "got a request of %d bytes while"
1191 					"in state WAIT_ACK_SET_CONF_INTERF\n",
1192 					length);
1193 				ep_del_request(ep, req);
1194 				rc = -EL2HLT;
1195 			}
1196 			ep0_idle(ep->dev);
1197 			break;
1198 		case IN_DATA_STAGE:
1199 			if (!ep_is_full(ep))
1200 				if (write_ep0_fifo(ep, req))
1201 					ep0_end_in_req(ep, req, NULL);
1202 			break;
1203 		case OUT_DATA_STAGE:
1204 			if ((length == 0) || !epout_has_pkt(ep))
1205 				if (read_ep0_fifo(ep, req))
1206 					ep0_end_out_req(ep, req, NULL);
1207 			break;
1208 		default:
1209 			ep_err(ep, "odd state %s to send me a request\n",
1210 				EP0_STNAME(ep->dev));
1211 			ep_del_request(ep, req);
1212 			rc = -EL2HLT;
1213 			break;
1214 		}
1215 	} else {
1216 		if (!recursion_detected)
1217 			handle_ep(ep);
1218 	}
1219 
1220 out:
1221 	return rc;
1222 out_locked:
1223 	spin_unlock_irqrestore(&ep->lock, flags);
1224 	goto out;
1225 }
1226 
1227 /**
1228  * pxa_ep_dequeue - Dequeue one request
1229  * @_ep: usb endpoint
1230  * @_req: usb request
1231  *
1232  * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
1233  */
1234 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1235 {
1236 	struct pxa_ep		*ep;
1237 	struct udc_usb_ep	*udc_usb_ep;
1238 	struct pxa27x_request	*req;
1239 	unsigned long		flags;
1240 	int			rc = -EINVAL;
1241 
1242 	if (!_ep)
1243 		return rc;
1244 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1245 	ep = udc_usb_ep->pxa_ep;
1246 	if (!ep || is_ep0(ep))
1247 		return rc;
1248 
1249 	spin_lock_irqsave(&ep->lock, flags);
1250 
1251 	/* make sure it's actually queued on this endpoint */
1252 	list_for_each_entry(req, &ep->queue, queue) {
1253 		if (&req->req == _req) {
1254 			rc = 0;
1255 			break;
1256 		}
1257 	}
1258 
1259 	spin_unlock_irqrestore(&ep->lock, flags);
1260 	if (!rc)
1261 		req_done(ep, req, -ECONNRESET, NULL);
1262 	return rc;
1263 }
1264 
1265 /**
1266  * pxa_ep_set_halt - Halts operations on one endpoint
1267  * @_ep: usb endpoint
1268  * @value:
1269  *
1270  * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
1271  */
1272 static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1273 {
1274 	struct pxa_ep		*ep;
1275 	struct udc_usb_ep	*udc_usb_ep;
1276 	unsigned long flags;
1277 	int rc;
1278 
1279 
1280 	if (!_ep)
1281 		return -EINVAL;
1282 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1283 	ep = udc_usb_ep->pxa_ep;
1284 	if (!ep || is_ep0(ep))
1285 		return -EINVAL;
1286 
1287 	if (value == 0) {
1288 		/*
1289 		 * This path (reset toggle+halt) is needed to implement
1290 		 * SET_INTERFACE on normal hardware.  but it can't be
1291 		 * done from software on the PXA UDC, and the hardware
1292 		 * forgets to do it as part of SET_INTERFACE automagic.
1293 		 */
1294 		ep_dbg(ep, "only host can clear halt\n");
1295 		return -EROFS;
1296 	}
1297 
1298 	spin_lock_irqsave(&ep->lock, flags);
1299 
1300 	rc = -EAGAIN;
1301 	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
1302 		goto out;
1303 
1304 	/* FST, FEF bits are the same for control and non control endpoints */
1305 	rc = 0;
1306 	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
1307 	if (is_ep0(ep))
1308 		set_ep0state(ep->dev, STALL);
1309 
1310 out:
1311 	spin_unlock_irqrestore(&ep->lock, flags);
1312 	return rc;
1313 }
1314 
1315 /**
1316  * pxa_ep_fifo_status - Get how many bytes in physical endpoint
1317  * @_ep: usb endpoint
1318  *
1319  * Returns number of bytes in OUT fifos. Broken for IN fifos.
1320  */
1321 static int pxa_ep_fifo_status(struct usb_ep *_ep)
1322 {
1323 	struct pxa_ep		*ep;
1324 	struct udc_usb_ep	*udc_usb_ep;
1325 
1326 	if (!_ep)
1327 		return -ENODEV;
1328 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1329 	ep = udc_usb_ep->pxa_ep;
1330 	if (!ep || is_ep0(ep))
1331 		return -ENODEV;
1332 
1333 	if (ep->dir_in)
1334 		return -EOPNOTSUPP;
1335 	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
1336 		return 0;
1337 	else
1338 		return ep_count_bytes_remain(ep) + 1;
1339 }
1340 
1341 /**
1342  * pxa_ep_fifo_flush - Flushes one endpoint
1343  * @_ep: usb endpoint
1344  *
1345  * Discards all data in one endpoint(IN or OUT), except control endpoint.
1346  */
1347 static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1348 {
1349 	struct pxa_ep		*ep;
1350 	struct udc_usb_ep	*udc_usb_ep;
1351 	unsigned long		flags;
1352 
1353 	if (!_ep)
1354 		return;
1355 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1356 	ep = udc_usb_ep->pxa_ep;
1357 	if (!ep || is_ep0(ep))
1358 		return;
1359 
1360 	spin_lock_irqsave(&ep->lock, flags);
1361 
1362 	if (unlikely(!list_empty(&ep->queue)))
1363 		ep_dbg(ep, "called while queue list not empty\n");
1364 	ep_dbg(ep, "called\n");
1365 
1366 	/* for OUT, just read and discard the FIFO contents. */
1367 	if (!ep->dir_in) {
1368 		while (!ep_is_empty(ep))
1369 			udc_ep_readl(ep, UDCDR);
1370 	} else {
1371 		/* most IN status is the same, but ISO can't stall */
1372 		ep_write_UDCCSR(ep,
1373 				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1374 				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1375 	}
1376 
1377 	spin_unlock_irqrestore(&ep->lock, flags);
1378 }
1379 
1380 /**
1381  * pxa_ep_enable - Enables usb endpoint
1382  * @_ep: usb endpoint
1383  * @desc: usb endpoint descriptor
1384  *
1385  * Nothing much to do here, as ep configuration is done once and for all
1386  * before udc is enabled. After udc enable, no physical endpoint configuration
1387  * can be changed.
1388  * Function makes sanity checks and flushes the endpoint.
1389  */
1390 static int pxa_ep_enable(struct usb_ep *_ep,
1391 	const struct usb_endpoint_descriptor *desc)
1392 {
1393 	struct pxa_ep		*ep;
1394 	struct udc_usb_ep	*udc_usb_ep;
1395 	struct pxa_udc		*udc;
1396 
1397 	if (!_ep || !desc)
1398 		return -EINVAL;
1399 
1400 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1401 	if (udc_usb_ep->pxa_ep) {
1402 		ep = udc_usb_ep->pxa_ep;
1403 		ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
1404 			_ep->name);
1405 	} else {
1406 		ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
1407 	}
1408 
1409 	if (!ep || is_ep0(ep)) {
1410 		dev_err(udc_usb_ep->dev->dev,
1411 			"unable to match pxa_ep for ep %s\n",
1412 			_ep->name);
1413 		return -EINVAL;
1414 	}
1415 
1416 	if ((desc->bDescriptorType != USB_DT_ENDPOINT)
1417 			|| (ep->type != usb_endpoint_type(desc))) {
1418 		ep_err(ep, "type mismatch\n");
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (ep->fifo_size < usb_endpoint_maxp(desc)) {
1423 		ep_err(ep, "bad maxpacket\n");
1424 		return -ERANGE;
1425 	}
1426 
1427 	udc_usb_ep->pxa_ep = ep;
1428 	udc = ep->dev;
1429 
1430 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
1431 		ep_err(ep, "bogus device state\n");
1432 		return -ESHUTDOWN;
1433 	}
1434 
1435 	ep->enabled = 1;
1436 
1437 	/* flush fifo (mostly for OUT buffers) */
1438 	pxa_ep_fifo_flush(_ep);
1439 
1440 	ep_dbg(ep, "enabled\n");
1441 	return 0;
1442 }
1443 
1444 /**
1445  * pxa_ep_disable - Disable usb endpoint
1446  * @_ep: usb endpoint
1447  *
1448  * Same as for pxa_ep_enable, no physical endpoint configuration can be
1449  * changed.
1450  * Function flushes the endpoint and related requests.
1451  */
1452 static int pxa_ep_disable(struct usb_ep *_ep)
1453 {
1454 	struct pxa_ep		*ep;
1455 	struct udc_usb_ep	*udc_usb_ep;
1456 
1457 	if (!_ep)
1458 		return -EINVAL;
1459 
1460 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1461 	ep = udc_usb_ep->pxa_ep;
1462 	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1463 		return -EINVAL;
1464 
1465 	ep->enabled = 0;
1466 	nuke(ep, -ESHUTDOWN);
1467 
1468 	pxa_ep_fifo_flush(_ep);
1469 	udc_usb_ep->pxa_ep = NULL;
1470 
1471 	ep_dbg(ep, "disabled\n");
1472 	return 0;
1473 }
1474 
1475 static struct usb_ep_ops pxa_ep_ops = {
1476 	.enable		= pxa_ep_enable,
1477 	.disable	= pxa_ep_disable,
1478 
1479 	.alloc_request	= pxa_ep_alloc_request,
1480 	.free_request	= pxa_ep_free_request,
1481 
1482 	.queue		= pxa_ep_queue,
1483 	.dequeue	= pxa_ep_dequeue,
1484 
1485 	.set_halt	= pxa_ep_set_halt,
1486 	.fifo_status	= pxa_ep_fifo_status,
1487 	.fifo_flush	= pxa_ep_fifo_flush,
1488 };
1489 
1490 /**
1491  * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
1492  * @udc: udc device
1493  * @on: 0 if disconnect pullup resistor, 1 otherwise
1494  * Context: any
1495  *
1496  * Handle D+ pullup resistor, make the device visible to the usb bus, and
1497  * declare it as a full speed usb device
1498  */
1499 static void dplus_pullup(struct pxa_udc *udc, int on)
1500 {
1501 	if (udc->gpiod) {
1502 		gpiod_set_value(udc->gpiod, on);
1503 	} else if (udc->udc_command) {
1504 		if (on)
1505 			udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
1506 		else
1507 			udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
1508 	}
1509 	udc->pullup_on = on;
1510 }
1511 
1512 /**
1513  * pxa_udc_get_frame - Returns usb frame number
1514  * @_gadget: usb gadget
1515  */
1516 static int pxa_udc_get_frame(struct usb_gadget *_gadget)
1517 {
1518 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1519 
1520 	return (udc_readl(udc, UDCFNR) & 0x7ff);
1521 }
1522 
1523 /**
1524  * pxa_udc_wakeup - Force udc device out of suspend
1525  * @_gadget: usb gadget
1526  *
1527  * Returns 0 if successful, error code otherwise
1528  */
1529 static int pxa_udc_wakeup(struct usb_gadget *_gadget)
1530 {
1531 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1532 
1533 	/* host may not have enabled remote wakeup */
1534 	if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
1535 		return -EHOSTUNREACH;
1536 	udc_set_mask_UDCCR(udc, UDCCR_UDR);
1537 	return 0;
1538 }
1539 
1540 static void udc_enable(struct pxa_udc *udc);
1541 static void udc_disable(struct pxa_udc *udc);
1542 
1543 /**
1544  * should_enable_udc - Tells if UDC should be enabled
1545  * @udc: udc device
1546  * Context: any
1547  *
1548  * The UDC should be enabled if :
1549 
1550  *  - the pullup resistor is connected
1551  *  - and a gadget driver is bound
1552  *  - and vbus is sensed (or no vbus sense is available)
1553  *
1554  * Returns 1 if UDC should be enabled, 0 otherwise
1555  */
1556 static int should_enable_udc(struct pxa_udc *udc)
1557 {
1558 	int put_on;
1559 
1560 	put_on = ((udc->pullup_on) && (udc->driver));
1561 	put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver)));
1562 	return put_on;
1563 }
1564 
1565 /**
1566  * should_disable_udc - Tells if UDC should be disabled
1567  * @udc: udc device
1568  * Context: any
1569  *
1570  * The UDC should be disabled if :
1571  *  - the pullup resistor is not connected
1572  *  - or no gadget driver is bound
1573  *  - or no vbus is sensed (when vbus sesing is available)
1574  *
1575  * Returns 1 if UDC should be disabled
1576  */
1577 static int should_disable_udc(struct pxa_udc *udc)
1578 {
1579 	int put_off;
1580 
1581 	put_off = ((!udc->pullup_on) || (!udc->driver));
1582 	put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver)));
1583 	return put_off;
1584 }
1585 
1586 /**
1587  * pxa_udc_pullup - Offer manual D+ pullup control
1588  * @_gadget: usb gadget using the control
1589  * @is_active: 0 if disconnect, else connect D+ pullup resistor
1590  * Context: !in_interrupt()
1591  *
1592  * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
1593  */
1594 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
1595 {
1596 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1597 
1598 	if (!udc->gpiod && !udc->udc_command)
1599 		return -EOPNOTSUPP;
1600 
1601 	dplus_pullup(udc, is_active);
1602 
1603 	if (should_enable_udc(udc))
1604 		udc_enable(udc);
1605 	if (should_disable_udc(udc))
1606 		udc_disable(udc);
1607 	return 0;
1608 }
1609 
1610 static void udc_enable(struct pxa_udc *udc);
1611 static void udc_disable(struct pxa_udc *udc);
1612 
1613 /**
1614  * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
1615  * @_gadget: usb gadget
1616  * @is_active: 0 if should disable the udc, 1 if should enable
1617  *
1618  * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
1619  * udc, and deactivates D+ pullup resistor.
1620  *
1621  * Returns 0
1622  */
1623 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1624 {
1625 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1626 
1627 	udc->vbus_sensed = is_active;
1628 	if (should_enable_udc(udc))
1629 		udc_enable(udc);
1630 	if (should_disable_udc(udc))
1631 		udc_disable(udc);
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
1638  * @_gadget: usb gadget
1639  * @mA: current drawn
1640  *
1641  * Context: !in_interrupt()
1642  *
1643  * Called after a configuration was chosen by a USB host, to inform how much
1644  * current can be drawn by the device from VBus line.
1645  *
1646  * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
1647  */
1648 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1649 {
1650 	struct pxa_udc *udc;
1651 
1652 	udc = to_gadget_udc(_gadget);
1653 	if (!IS_ERR_OR_NULL(udc->transceiver))
1654 		return usb_phy_set_power(udc->transceiver, mA);
1655 	return -EOPNOTSUPP;
1656 }
1657 
1658 static int pxa27x_udc_start(struct usb_gadget *g,
1659 		struct usb_gadget_driver *driver);
1660 static int pxa27x_udc_stop(struct usb_gadget *g);
1661 
1662 static const struct usb_gadget_ops pxa_udc_ops = {
1663 	.get_frame	= pxa_udc_get_frame,
1664 	.wakeup		= pxa_udc_wakeup,
1665 	.pullup		= pxa_udc_pullup,
1666 	.vbus_session	= pxa_udc_vbus_session,
1667 	.vbus_draw	= pxa_udc_vbus_draw,
1668 	.udc_start	= pxa27x_udc_start,
1669 	.udc_stop	= pxa27x_udc_stop,
1670 };
1671 
1672 /**
1673  * udc_disable - disable udc device controller
1674  * @udc: udc device
1675  * Context: any
1676  *
1677  * Disables the udc device : disables clocks, udc interrupts, control endpoint
1678  * interrupts.
1679  */
1680 static void udc_disable(struct pxa_udc *udc)
1681 {
1682 	if (!udc->enabled)
1683 		return;
1684 
1685 	udc_writel(udc, UDCICR0, 0);
1686 	udc_writel(udc, UDCICR1, 0);
1687 
1688 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1689 
1690 	ep0_idle(udc);
1691 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1692 	clk_disable(udc->clk);
1693 
1694 	udc->enabled = 0;
1695 }
1696 
1697 /**
1698  * udc_init_data - Initialize udc device data structures
1699  * @dev: udc device
1700  *
1701  * Initializes gadget endpoint list, endpoints locks. No action is taken
1702  * on the hardware.
1703  */
1704 static void udc_init_data(struct pxa_udc *dev)
1705 {
1706 	int i;
1707 	struct pxa_ep *ep;
1708 
1709 	/* device/ep0 records init */
1710 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1711 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1712 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
1713 	ep0_idle(dev);
1714 
1715 	/* PXA endpoints init */
1716 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
1717 		ep = &dev->pxa_ep[i];
1718 
1719 		ep->enabled = is_ep0(ep);
1720 		INIT_LIST_HEAD(&ep->queue);
1721 		spin_lock_init(&ep->lock);
1722 	}
1723 
1724 	/* USB endpoints init */
1725 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
1726 		list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
1727 				&dev->gadget.ep_list);
1728 		usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
1729 					   dev->udc_usb_ep[i].usb_ep.maxpacket);
1730 	}
1731 }
1732 
1733 /**
1734  * udc_enable - Enables the udc device
1735  * @dev: udc device
1736  *
1737  * Enables the udc device : enables clocks, udc interrupts, control endpoint
1738  * interrupts, sets usb as UDC client and setups endpoints.
1739  */
1740 static void udc_enable(struct pxa_udc *udc)
1741 {
1742 	if (udc->enabled)
1743 		return;
1744 
1745 	clk_enable(udc->clk);
1746 	udc_writel(udc, UDCICR0, 0);
1747 	udc_writel(udc, UDCICR1, 0);
1748 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1749 
1750 	ep0_idle(udc);
1751 	udc->gadget.speed = USB_SPEED_FULL;
1752 	memset(&udc->stats, 0, sizeof(udc->stats));
1753 
1754 	pxa_eps_setup(udc);
1755 	udc_set_mask_UDCCR(udc, UDCCR_UDE);
1756 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
1757 	udelay(2);
1758 	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1759 		dev_err(udc->dev, "Configuration errors, udc disabled\n");
1760 
1761 	/*
1762 	 * Caller must be able to sleep in order to cope with startup transients
1763 	 */
1764 	msleep(100);
1765 
1766 	/* enable suspend/resume and reset irqs */
1767 	udc_writel(udc, UDCICR1,
1768 			UDCICR1_IECC | UDCICR1_IERU
1769 			| UDCICR1_IESU | UDCICR1_IERS);
1770 
1771 	/* enable ep0 irqs */
1772 	pio_irq_enable(&udc->pxa_ep[0]);
1773 
1774 	udc->enabled = 1;
1775 }
1776 
1777 /**
1778  * pxa27x_start - Register gadget driver
1779  * @driver: gadget driver
1780  * @bind: bind function
1781  *
1782  * When a driver is successfully registered, it will receive control requests
1783  * including set_configuration(), which enables non-control requests.  Then
1784  * usb traffic follows until a disconnect is reported.  Then a host may connect
1785  * again, or the driver might get unbound.
1786  *
1787  * Note that the udc is not automatically enabled. Check function
1788  * should_enable_udc().
1789  *
1790  * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1791  */
1792 static int pxa27x_udc_start(struct usb_gadget *g,
1793 		struct usb_gadget_driver *driver)
1794 {
1795 	struct pxa_udc *udc = to_pxa(g);
1796 	int retval;
1797 
1798 	/* first hook up the driver ... */
1799 	udc->driver = driver;
1800 
1801 	if (!IS_ERR_OR_NULL(udc->transceiver)) {
1802 		retval = otg_set_peripheral(udc->transceiver->otg,
1803 						&udc->gadget);
1804 		if (retval) {
1805 			dev_err(udc->dev, "can't bind to transceiver\n");
1806 			goto fail;
1807 		}
1808 	}
1809 
1810 	if (should_enable_udc(udc))
1811 		udc_enable(udc);
1812 	return 0;
1813 
1814 fail:
1815 	udc->driver = NULL;
1816 	return retval;
1817 }
1818 
1819 /**
1820  * stop_activity - Stops udc endpoints
1821  * @udc: udc device
1822  * @driver: gadget driver
1823  *
1824  * Disables all udc endpoints (even control endpoint), report disconnect to
1825  * the gadget user.
1826  */
1827 static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
1828 {
1829 	int i;
1830 
1831 	/* don't disconnect drivers more than once */
1832 	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1833 		driver = NULL;
1834 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1835 
1836 	for (i = 0; i < NR_USB_ENDPOINTS; i++)
1837 		pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
1838 }
1839 
1840 /**
1841  * pxa27x_udc_stop - Unregister the gadget driver
1842  * @driver: gadget driver
1843  *
1844  * Returns 0 if no error, -ENODEV, -EINVAL otherwise
1845  */
1846 static int pxa27x_udc_stop(struct usb_gadget *g)
1847 {
1848 	struct pxa_udc *udc = to_pxa(g);
1849 
1850 	stop_activity(udc, NULL);
1851 	udc_disable(udc);
1852 
1853 	udc->driver = NULL;
1854 
1855 	if (!IS_ERR_OR_NULL(udc->transceiver))
1856 		return otg_set_peripheral(udc->transceiver->otg, NULL);
1857 	return 0;
1858 }
1859 
1860 /**
1861  * handle_ep0_ctrl_req - handle control endpoint control request
1862  * @udc: udc device
1863  * @req: control request
1864  */
1865 static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1866 				struct pxa27x_request *req)
1867 {
1868 	struct pxa_ep *ep = &udc->pxa_ep[0];
1869 	union {
1870 		struct usb_ctrlrequest	r;
1871 		u32			word[2];
1872 	} u;
1873 	int i;
1874 	int have_extrabytes = 0;
1875 	unsigned long flags;
1876 
1877 	nuke(ep, -EPROTO);
1878 	spin_lock_irqsave(&ep->lock, flags);
1879 
1880 	/*
1881 	 * In the PXA320 manual, in the section about Back-to-Back setup
1882 	 * packets, it describes this situation.  The solution is to set OPC to
1883 	 * get rid of the status packet, and then continue with the setup
1884 	 * packet. Generalize to pxa27x CPUs.
1885 	 */
1886 	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
1887 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1888 
1889 	/* read SETUP packet */
1890 	for (i = 0; i < 2; i++) {
1891 		if (unlikely(ep_is_empty(ep)))
1892 			goto stall;
1893 		u.word[i] = udc_ep_readl(ep, UDCDR);
1894 	}
1895 
1896 	have_extrabytes = !ep_is_empty(ep);
1897 	while (!ep_is_empty(ep)) {
1898 		i = udc_ep_readl(ep, UDCDR);
1899 		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
1900 	}
1901 
1902 	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1903 		u.r.bRequestType, u.r.bRequest,
1904 		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
1905 		le16_to_cpu(u.r.wLength));
1906 	if (unlikely(have_extrabytes))
1907 		goto stall;
1908 
1909 	if (u.r.bRequestType & USB_DIR_IN)
1910 		set_ep0state(udc, IN_DATA_STAGE);
1911 	else
1912 		set_ep0state(udc, OUT_DATA_STAGE);
1913 
1914 	/* Tell UDC to enter Data Stage */
1915 	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1916 
1917 	spin_unlock_irqrestore(&ep->lock, flags);
1918 	i = udc->driver->setup(&udc->gadget, &u.r);
1919 	spin_lock_irqsave(&ep->lock, flags);
1920 	if (i < 0)
1921 		goto stall;
1922 out:
1923 	spin_unlock_irqrestore(&ep->lock, flags);
1924 	return;
1925 stall:
1926 	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1927 		udc_ep_readl(ep, UDCCSR), i);
1928 	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
1929 	set_ep0state(udc, STALL);
1930 	goto out;
1931 }
1932 
1933 /**
1934  * handle_ep0 - Handle control endpoint data transfers
1935  * @udc: udc device
1936  * @fifo_irq: 1 if triggered by fifo service type irq
1937  * @opc_irq: 1 if triggered by output packet complete type irq
1938  *
1939  * Context : when in_interrupt() or with ep->lock held
1940  *
1941  * Tries to transfer all pending request data into the endpoint and/or
1942  * transfer all pending data in the endpoint into usb requests.
1943  * Handles states of ep0 automata.
1944  *
1945  * PXA27x hardware handles several standard usb control requests without
1946  * driver notification.  The requests fully handled by hardware are :
1947  *  SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
1948  *  GET_STATUS
1949  * The requests handled by hardware, but with irq notification are :
1950  *  SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
1951  * The remaining standard requests really handled by handle_ep0 are :
1952  *  GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
1953  * Requests standardized outside of USB 2.0 chapter 9 are handled more
1954  * uniformly, by gadget drivers.
1955  *
1956  * The control endpoint state machine is _not_ USB spec compliant, it's even
1957  * hardly compliant with Intel PXA270 developers guide.
1958  * The key points which inferred this state machine are :
1959  *   - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
1960  *     software.
1961  *   - on every OUT packet received, UDCCSR0_OPC is raised and held until
1962  *     cleared by software.
1963  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
1964  *     before reading ep0.
1965  *     This is true only for PXA27x. This is not true anymore for PXA3xx family
1966  *     (check Back-to-Back setup packet in developers guide).
1967  *   - irq can be called on a "packet complete" event (opc_irq=1), while
1968  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
1969  *     from experimentation).
1970  *   - as UDCCSR0_SA can be activated while in irq handling, and clearing
1971  *     UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
1972  *     => we never actually read the "status stage" packet of an IN data stage
1973  *     => this is not documented in Intel documentation
1974  *   - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
1975  *     STAGE. The driver add STATUS STAGE to send last zero length packet in
1976  *     OUT_STATUS_STAGE.
1977  *   - special attention was needed for IN_STATUS_STAGE. If a packet complete
1978  *     event is detected, we terminate the status stage without ackowledging the
1979  *     packet (not to risk to loose a potential SETUP packet)
1980  */
1981 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
1982 {
1983 	u32			udccsr0;
1984 	struct pxa_ep		*ep = &udc->pxa_ep[0];
1985 	struct pxa27x_request	*req = NULL;
1986 	int			completed = 0;
1987 
1988 	if (!list_empty(&ep->queue))
1989 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
1990 
1991 	udccsr0 = udc_ep_readl(ep, UDCCSR);
1992 	ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
1993 		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
1994 		(fifo_irq << 1 | opc_irq));
1995 
1996 	if (udccsr0 & UDCCSR0_SST) {
1997 		ep_dbg(ep, "clearing stall status\n");
1998 		nuke(ep, -EPIPE);
1999 		ep_write_UDCCSR(ep, UDCCSR0_SST);
2000 		ep0_idle(udc);
2001 	}
2002 
2003 	if (udccsr0 & UDCCSR0_SA) {
2004 		nuke(ep, 0);
2005 		set_ep0state(udc, SETUP_STAGE);
2006 	}
2007 
2008 	switch (udc->ep0state) {
2009 	case WAIT_FOR_SETUP:
2010 		/*
2011 		 * Hardware bug : beware, we cannot clear OPC, since we would
2012 		 * miss a potential OPC irq for a setup packet.
2013 		 * So, we only do ... nothing, and hope for a next irq with
2014 		 * UDCCSR0_SA set.
2015 		 */
2016 		break;
2017 	case SETUP_STAGE:
2018 		udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
2019 		if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
2020 			handle_ep0_ctrl_req(udc, req);
2021 		break;
2022 	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
2023 		if (epout_has_pkt(ep))
2024 			ep_write_UDCCSR(ep, UDCCSR0_OPC);
2025 		if (req && !ep_is_full(ep))
2026 			completed = write_ep0_fifo(ep, req);
2027 		if (completed)
2028 			ep0_end_in_req(ep, req, NULL);
2029 		break;
2030 	case OUT_DATA_STAGE:			/* SET_DESCRIPTOR */
2031 		if (epout_has_pkt(ep) && req)
2032 			completed = read_ep0_fifo(ep, req);
2033 		if (completed)
2034 			ep0_end_out_req(ep, req, NULL);
2035 		break;
2036 	case STALL:
2037 		ep_write_UDCCSR(ep, UDCCSR0_FST);
2038 		break;
2039 	case IN_STATUS_STAGE:
2040 		/*
2041 		 * Hardware bug : beware, we cannot clear OPC, since we would
2042 		 * miss a potential PC irq for a setup packet.
2043 		 * So, we only put the ep0 into WAIT_FOR_SETUP state.
2044 		 */
2045 		if (opc_irq)
2046 			ep0_idle(udc);
2047 		break;
2048 	case OUT_STATUS_STAGE:
2049 	case WAIT_ACK_SET_CONF_INTERF:
2050 		ep_warn(ep, "should never get in %s state here!!!\n",
2051 				EP0_STNAME(ep->dev));
2052 		ep0_idle(udc);
2053 		break;
2054 	}
2055 }
2056 
2057 /**
2058  * handle_ep - Handle endpoint data tranfers
2059  * @ep: pxa physical endpoint
2060  *
2061  * Tries to transfer all pending request data into the endpoint and/or
2062  * transfer all pending data in the endpoint into usb requests.
2063  *
2064  * Is always called when in_interrupt() and with ep->lock released.
2065  */
2066 static void handle_ep(struct pxa_ep *ep)
2067 {
2068 	struct pxa27x_request	*req;
2069 	int completed;
2070 	u32 udccsr;
2071 	int is_in = ep->dir_in;
2072 	int loop = 0;
2073 	unsigned long		flags;
2074 
2075 	spin_lock_irqsave(&ep->lock, flags);
2076 	if (ep->in_handle_ep)
2077 		goto recursion_detected;
2078 	ep->in_handle_ep = 1;
2079 
2080 	do {
2081 		completed = 0;
2082 		udccsr = udc_ep_readl(ep, UDCCSR);
2083 
2084 		if (likely(!list_empty(&ep->queue)))
2085 			req = list_entry(ep->queue.next,
2086 					struct pxa27x_request, queue);
2087 		else
2088 			req = NULL;
2089 
2090 		ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
2091 				req, udccsr, loop++);
2092 
2093 		if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
2094 			udc_ep_writel(ep, UDCCSR,
2095 					udccsr & (UDCCSR_SST | UDCCSR_TRN));
2096 		if (!req)
2097 			break;
2098 
2099 		if (unlikely(is_in)) {
2100 			if (likely(!ep_is_full(ep)))
2101 				completed = write_fifo(ep, req);
2102 		} else {
2103 			if (likely(epout_has_pkt(ep)))
2104 				completed = read_fifo(ep, req);
2105 		}
2106 
2107 		if (completed) {
2108 			if (is_in)
2109 				ep_end_in_req(ep, req, &flags);
2110 			else
2111 				ep_end_out_req(ep, req, &flags);
2112 		}
2113 	} while (completed);
2114 
2115 	ep->in_handle_ep = 0;
2116 recursion_detected:
2117 	spin_unlock_irqrestore(&ep->lock, flags);
2118 }
2119 
2120 /**
2121  * pxa27x_change_configuration - Handle SET_CONF usb request notification
2122  * @udc: udc device
2123  * @config: usb configuration
2124  *
2125  * Post the request to upper level.
2126  * Don't use any pxa specific harware configuration capabilities
2127  */
2128 static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
2129 {
2130 	struct usb_ctrlrequest req ;
2131 
2132 	dev_dbg(udc->dev, "config=%d\n", config);
2133 
2134 	udc->config = config;
2135 	udc->last_interface = 0;
2136 	udc->last_alternate = 0;
2137 
2138 	req.bRequestType = 0;
2139 	req.bRequest = USB_REQ_SET_CONFIGURATION;
2140 	req.wValue = config;
2141 	req.wIndex = 0;
2142 	req.wLength = 0;
2143 
2144 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2145 	udc->driver->setup(&udc->gadget, &req);
2146 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2147 }
2148 
2149 /**
2150  * pxa27x_change_interface - Handle SET_INTERF usb request notification
2151  * @udc: udc device
2152  * @iface: interface number
2153  * @alt: alternate setting number
2154  *
2155  * Post the request to upper level.
2156  * Don't use any pxa specific harware configuration capabilities
2157  */
2158 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
2159 {
2160 	struct usb_ctrlrequest  req;
2161 
2162 	dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
2163 
2164 	udc->last_interface = iface;
2165 	udc->last_alternate = alt;
2166 
2167 	req.bRequestType = USB_RECIP_INTERFACE;
2168 	req.bRequest = USB_REQ_SET_INTERFACE;
2169 	req.wValue = alt;
2170 	req.wIndex = iface;
2171 	req.wLength = 0;
2172 
2173 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2174 	udc->driver->setup(&udc->gadget, &req);
2175 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2176 }
2177 
2178 /*
2179  * irq_handle_data - Handle data transfer
2180  * @irq: irq IRQ number
2181  * @udc: dev pxa_udc device structure
2182  *
2183  * Called from irq handler, transferts data to or from endpoint to queue
2184  */
2185 static void irq_handle_data(int irq, struct pxa_udc *udc)
2186 {
2187 	int i;
2188 	struct pxa_ep *ep;
2189 	u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
2190 	u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
2191 
2192 	if (udcisr0 & UDCISR_INT_MASK) {
2193 		udc->pxa_ep[0].stats.irqs++;
2194 		udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
2195 		handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
2196 				!!(udcisr0 & UDCICR_PKTCOMPL));
2197 	}
2198 
2199 	udcisr0 >>= 2;
2200 	for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
2201 		if (!(udcisr0 & UDCISR_INT_MASK))
2202 			continue;
2203 
2204 		udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2205 
2206 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2207 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2208 			ep = &udc->pxa_ep[i];
2209 			ep->stats.irqs++;
2210 			handle_ep(ep);
2211 		}
2212 	}
2213 
2214 	for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
2215 		udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
2216 		if (!(udcisr1 & UDCISR_INT_MASK))
2217 			continue;
2218 
2219 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2220 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2221 			ep = &udc->pxa_ep[i];
2222 			ep->stats.irqs++;
2223 			handle_ep(ep);
2224 		}
2225 	}
2226 
2227 }
2228 
2229 /**
2230  * irq_udc_suspend - Handle IRQ "UDC Suspend"
2231  * @udc: udc device
2232  */
2233 static void irq_udc_suspend(struct pxa_udc *udc)
2234 {
2235 	udc_writel(udc, UDCISR1, UDCISR1_IRSU);
2236 	udc->stats.irqs_suspend++;
2237 
2238 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2239 			&& udc->driver && udc->driver->suspend)
2240 		udc->driver->suspend(&udc->gadget);
2241 	ep0_idle(udc);
2242 }
2243 
2244 /**
2245   * irq_udc_resume - Handle IRQ "UDC Resume"
2246   * @udc: udc device
2247   */
2248 static void irq_udc_resume(struct pxa_udc *udc)
2249 {
2250 	udc_writel(udc, UDCISR1, UDCISR1_IRRU);
2251 	udc->stats.irqs_resume++;
2252 
2253 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2254 			&& udc->driver && udc->driver->resume)
2255 		udc->driver->resume(&udc->gadget);
2256 }
2257 
2258 /**
2259  * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
2260  * @udc: udc device
2261  */
2262 static void irq_udc_reconfig(struct pxa_udc *udc)
2263 {
2264 	unsigned config, interface, alternate, config_change;
2265 	u32 udccr = udc_readl(udc, UDCCR);
2266 
2267 	udc_writel(udc, UDCISR1, UDCISR1_IRCC);
2268 	udc->stats.irqs_reconfig++;
2269 
2270 	config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
2271 	config_change = (config != udc->config);
2272 	pxa27x_change_configuration(udc, config);
2273 
2274 	interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
2275 	alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
2276 	pxa27x_change_interface(udc, interface, alternate);
2277 
2278 	if (config_change)
2279 		update_pxa_ep_matches(udc);
2280 	udc_set_mask_UDCCR(udc, UDCCR_SMAC);
2281 }
2282 
2283 /**
2284  * irq_udc_reset - Handle IRQ "UDC Reset"
2285  * @udc: udc device
2286  */
2287 static void irq_udc_reset(struct pxa_udc *udc)
2288 {
2289 	u32 udccr = udc_readl(udc, UDCCR);
2290 	struct pxa_ep *ep = &udc->pxa_ep[0];
2291 
2292 	dev_info(udc->dev, "USB reset\n");
2293 	udc_writel(udc, UDCISR1, UDCISR1_IRRS);
2294 	udc->stats.irqs_reset++;
2295 
2296 	if ((udccr & UDCCR_UDA) == 0) {
2297 		dev_dbg(udc->dev, "USB reset start\n");
2298 		stop_activity(udc, udc->driver);
2299 	}
2300 	udc->gadget.speed = USB_SPEED_FULL;
2301 	memset(&udc->stats, 0, sizeof udc->stats);
2302 
2303 	nuke(ep, -EPROTO);
2304 	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
2305 	ep0_idle(udc);
2306 }
2307 
2308 /**
2309  * pxa_udc_irq - Main irq handler
2310  * @irq: irq number
2311  * @_dev: udc device
2312  *
2313  * Handles all udc interrupts
2314  */
2315 static irqreturn_t pxa_udc_irq(int irq, void *_dev)
2316 {
2317 	struct pxa_udc *udc = _dev;
2318 	u32 udcisr0 = udc_readl(udc, UDCISR0);
2319 	u32 udcisr1 = udc_readl(udc, UDCISR1);
2320 	u32 udccr = udc_readl(udc, UDCCR);
2321 	u32 udcisr1_spec;
2322 
2323 	dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
2324 		 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
2325 
2326 	udcisr1_spec = udcisr1 & 0xf8000000;
2327 	if (unlikely(udcisr1_spec & UDCISR1_IRSU))
2328 		irq_udc_suspend(udc);
2329 	if (unlikely(udcisr1_spec & UDCISR1_IRRU))
2330 		irq_udc_resume(udc);
2331 	if (unlikely(udcisr1_spec & UDCISR1_IRCC))
2332 		irq_udc_reconfig(udc);
2333 	if (unlikely(udcisr1_spec & UDCISR1_IRRS))
2334 		irq_udc_reset(udc);
2335 
2336 	if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
2337 		irq_handle_data(irq, udc);
2338 
2339 	return IRQ_HANDLED;
2340 }
2341 
2342 static struct pxa_udc memory = {
2343 	.gadget = {
2344 		.ops		= &pxa_udc_ops,
2345 		.ep0		= &memory.udc_usb_ep[0].usb_ep,
2346 		.name		= driver_name,
2347 		.dev = {
2348 			.init_name	= "gadget",
2349 		},
2350 	},
2351 
2352 	.udc_usb_ep = {
2353 		USB_EP_CTRL,
2354 		USB_EP_OUT_BULK(1),
2355 		USB_EP_IN_BULK(2),
2356 		USB_EP_IN_ISO(3),
2357 		USB_EP_OUT_ISO(4),
2358 		USB_EP_IN_INT(5),
2359 	},
2360 
2361 	.pxa_ep = {
2362 		PXA_EP_CTRL,
2363 		/* Endpoints for gadget zero */
2364 		PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
2365 		PXA_EP_IN_BULK(2,  2, 3, 0, 0),
2366 		/* Endpoints for ether gadget, file storage gadget */
2367 		PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
2368 		PXA_EP_IN_BULK(4,  2, 1, 0, 0),
2369 		PXA_EP_IN_ISO(5,   3, 1, 0, 0),
2370 		PXA_EP_OUT_ISO(6,  4, 1, 0, 0),
2371 		PXA_EP_IN_INT(7,   5, 1, 0, 0),
2372 		/* Endpoints for RNDIS, serial */
2373 		PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
2374 		PXA_EP_IN_BULK(9,  2, 2, 0, 0),
2375 		PXA_EP_IN_INT(10,  5, 2, 0, 0),
2376 		/*
2377 		 * All the following endpoints are only for completion.  They
2378 		 * won't never work, as multiple interfaces are really broken on
2379 		 * the pxa.
2380 		*/
2381 		PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
2382 		PXA_EP_IN_BULK(12,  2, 2, 1, 0),
2383 		/* Endpoint for CDC Ether */
2384 		PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
2385 		PXA_EP_IN_BULK(14,  2, 1, 1, 1),
2386 	}
2387 };
2388 
2389 #if defined(CONFIG_OF)
2390 static const struct of_device_id udc_pxa_dt_ids[] = {
2391 	{ .compatible = "marvell,pxa270-udc" },
2392 	{}
2393 };
2394 MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
2395 #endif
2396 
2397 /**
2398  * pxa_udc_probe - probes the udc device
2399  * @_dev: platform device
2400  *
2401  * Perform basic init : allocates udc clock, creates sysfs files, requests
2402  * irq.
2403  */
2404 static int pxa_udc_probe(struct platform_device *pdev)
2405 {
2406 	struct resource *regs;
2407 	struct pxa_udc *udc = &memory;
2408 	int retval = 0, gpio;
2409 	struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
2410 	unsigned long gpio_flags;
2411 
2412 	if (mach) {
2413 		gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
2414 		gpio = mach->gpio_pullup;
2415 		if (gpio_is_valid(gpio)) {
2416 			retval = devm_gpio_request_one(&pdev->dev, gpio,
2417 						       gpio_flags,
2418 						       "USB D+ pullup");
2419 			if (retval)
2420 				return retval;
2421 			udc->gpiod = gpio_to_desc(mach->gpio_pullup);
2422 		}
2423 		udc->udc_command = mach->udc_command;
2424 	} else {
2425 		udc->gpiod = devm_gpiod_get(&pdev->dev, NULL);
2426 	}
2427 
2428 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2429 	udc->regs = devm_ioremap_resource(&pdev->dev, regs);
2430 	if (IS_ERR(udc->regs))
2431 		return PTR_ERR(udc->regs);
2432 	udc->irq = platform_get_irq(pdev, 0);
2433 	if (udc->irq < 0)
2434 		return udc->irq;
2435 
2436 	udc->dev = &pdev->dev;
2437 	udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
2438 
2439 	if (IS_ERR(udc->gpiod)) {
2440 		dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
2441 			PTR_ERR(udc->gpiod));
2442 		return PTR_ERR(udc->gpiod);
2443 	}
2444 	if (udc->gpiod)
2445 		gpiod_direction_output(udc->gpiod, 0);
2446 
2447 	udc->clk = devm_clk_get(&pdev->dev, NULL);
2448 	if (IS_ERR(udc->clk))
2449 		return PTR_ERR(udc->clk);
2450 
2451 	retval = clk_prepare(udc->clk);
2452 	if (retval)
2453 		return retval;
2454 
2455 	udc->vbus_sensed = 0;
2456 
2457 	the_controller = udc;
2458 	platform_set_drvdata(pdev, udc);
2459 	udc_init_data(udc);
2460 
2461 	/* irq setup after old hardware state is cleaned up */
2462 	retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
2463 				  IRQF_SHARED, driver_name, udc);
2464 	if (retval != 0) {
2465 		dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
2466 			driver_name, udc->irq, retval);
2467 		goto err;
2468 	}
2469 
2470 	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2471 	if (retval)
2472 		goto err;
2473 
2474 	pxa_init_debugfs(udc);
2475 	if (should_enable_udc(udc))
2476 		udc_enable(udc);
2477 	return 0;
2478 err:
2479 	clk_unprepare(udc->clk);
2480 	return retval;
2481 }
2482 
2483 /**
2484  * pxa_udc_remove - removes the udc device driver
2485  * @_dev: platform device
2486  */
2487 static int pxa_udc_remove(struct platform_device *_dev)
2488 {
2489 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2490 
2491 	usb_del_gadget_udc(&udc->gadget);
2492 	pxa_cleanup_debugfs(udc);
2493 
2494 	usb_put_phy(udc->transceiver);
2495 
2496 	udc->transceiver = NULL;
2497 	the_controller = NULL;
2498 	clk_unprepare(udc->clk);
2499 
2500 	return 0;
2501 }
2502 
2503 static void pxa_udc_shutdown(struct platform_device *_dev)
2504 {
2505 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2506 
2507 	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
2508 		udc_disable(udc);
2509 }
2510 
2511 #ifdef CONFIG_PXA27x
2512 extern void pxa27x_clear_otgph(void);
2513 #else
2514 #define pxa27x_clear_otgph()   do {} while (0)
2515 #endif
2516 
2517 #ifdef CONFIG_PM
2518 /**
2519  * pxa_udc_suspend - Suspend udc device
2520  * @_dev: platform device
2521  * @state: suspend state
2522  *
2523  * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
2524  * device.
2525  */
2526 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2527 {
2528 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2529 	struct pxa_ep *ep;
2530 
2531 	ep = &udc->pxa_ep[0];
2532 	udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
2533 
2534 	udc_disable(udc);
2535 	udc->pullup_resume = udc->pullup_on;
2536 	dplus_pullup(udc, 0);
2537 
2538 	return 0;
2539 }
2540 
2541 /**
2542  * pxa_udc_resume - Resume udc device
2543  * @_dev: platform device
2544  *
2545  * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
2546  * device.
2547  */
2548 static int pxa_udc_resume(struct platform_device *_dev)
2549 {
2550 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2551 	struct pxa_ep *ep;
2552 
2553 	ep = &udc->pxa_ep[0];
2554 	udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
2555 
2556 	dplus_pullup(udc, udc->pullup_resume);
2557 	if (should_enable_udc(udc))
2558 		udc_enable(udc);
2559 	/*
2560 	 * We do not handle OTG yet.
2561 	 *
2562 	 * OTGPH bit is set when sleep mode is entered.
2563 	 * it indicates that OTG pad is retaining its state.
2564 	 * Upon exit from sleep mode and before clearing OTGPH,
2565 	 * Software must configure the USB OTG pad, UDC, and UHC
2566 	 * to the state they were in before entering sleep mode.
2567 	 */
2568 	pxa27x_clear_otgph();
2569 
2570 	return 0;
2571 }
2572 #endif
2573 
2574 /* work with hotplug and coldplug */
2575 MODULE_ALIAS("platform:pxa27x-udc");
2576 
2577 static struct platform_driver udc_driver = {
2578 	.driver		= {
2579 		.name	= "pxa27x-udc",
2580 		.of_match_table = of_match_ptr(udc_pxa_dt_ids),
2581 	},
2582 	.probe		= pxa_udc_probe,
2583 	.remove		= pxa_udc_remove,
2584 	.shutdown	= pxa_udc_shutdown,
2585 #ifdef CONFIG_PM
2586 	.suspend	= pxa_udc_suspend,
2587 	.resume		= pxa_udc_resume
2588 #endif
2589 };
2590 
2591 module_platform_driver(udc_driver);
2592 
2593 MODULE_DESCRIPTION(DRIVER_DESC);
2594 MODULE_AUTHOR("Robert Jarzmik");
2595 MODULE_LICENSE("GPL");
2596