xref: /linux/drivers/usb/chipidea/udc.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * udc.c - ChipIdea UDC driver
3  *
4  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5  *
6  * Author: David Lopo
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/usb/ch9.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/otg.h>
29 #include <linux/usb/chipidea.h>
30 
31 #include "ci.h"
32 #include "udc.h"
33 #include "bits.h"
34 #include "debug.h"
35 
36 /* control endpoint description */
37 static const struct usb_endpoint_descriptor
38 ctrl_endpt_out_desc = {
39 	.bLength         = USB_DT_ENDPOINT_SIZE,
40 	.bDescriptorType = USB_DT_ENDPOINT,
41 
42 	.bEndpointAddress = USB_DIR_OUT,
43 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
44 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
45 };
46 
47 static const struct usb_endpoint_descriptor
48 ctrl_endpt_in_desc = {
49 	.bLength         = USB_DT_ENDPOINT_SIZE,
50 	.bDescriptorType = USB_DT_ENDPOINT,
51 
52 	.bEndpointAddress = USB_DIR_IN,
53 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
54 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
55 };
56 
57 /**
58  * hw_ep_bit: calculates the bit number
59  * @num: endpoint number
60  * @dir: endpoint direction
61  *
62  * This function returns bit number
63  */
64 static inline int hw_ep_bit(int num, int dir)
65 {
66 	return num + (dir ? 16 : 0);
67 }
68 
69 static inline int ep_to_bit(struct ci13xxx *udc, int n)
70 {
71 	int fill = 16 - udc->hw_ep_max / 2;
72 
73 	if (n >= udc->hw_ep_max / 2)
74 		n += fill;
75 
76 	return n;
77 }
78 
79 /**
80  * hw_device_state: enables/disables interrupts & starts/stops device (execute
81  *                  without interruption)
82  * @dma: 0 => disable, !0 => enable and set dma engine
83  *
84  * This function returns an error code
85  */
86 static int hw_device_state(struct ci13xxx *udc, u32 dma)
87 {
88 	if (dma) {
89 		hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
90 		/* interrupt, error, port change, reset, sleep/suspend */
91 		hw_write(udc, OP_USBINTR, ~0,
92 			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
93 		hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
94 	} else {
95 		hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
96 		hw_write(udc, OP_USBINTR, ~0, 0);
97 	}
98 	return 0;
99 }
100 
101 /**
102  * hw_ep_flush: flush endpoint fifo (execute without interruption)
103  * @num: endpoint number
104  * @dir: endpoint direction
105  *
106  * This function returns an error code
107  */
108 static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
109 {
110 	int n = hw_ep_bit(num, dir);
111 
112 	do {
113 		/* flush any pending transfer */
114 		hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
115 		while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
116 			cpu_relax();
117 	} while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
118 
119 	return 0;
120 }
121 
122 /**
123  * hw_ep_disable: disables endpoint (execute without interruption)
124  * @num: endpoint number
125  * @dir: endpoint direction
126  *
127  * This function returns an error code
128  */
129 static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
130 {
131 	hw_ep_flush(udc, num, dir);
132 	hw_write(udc, OP_ENDPTCTRL + num,
133 		 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
134 	return 0;
135 }
136 
137 /**
138  * hw_ep_enable: enables endpoint (execute without interruption)
139  * @num:  endpoint number
140  * @dir:  endpoint direction
141  * @type: endpoint type
142  *
143  * This function returns an error code
144  */
145 static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
146 {
147 	u32 mask, data;
148 
149 	if (dir) {
150 		mask  = ENDPTCTRL_TXT;  /* type    */
151 		data  = type << ffs_nr(mask);
152 
153 		mask |= ENDPTCTRL_TXS;  /* unstall */
154 		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
155 		data |= ENDPTCTRL_TXR;
156 		mask |= ENDPTCTRL_TXE;  /* enable  */
157 		data |= ENDPTCTRL_TXE;
158 	} else {
159 		mask  = ENDPTCTRL_RXT;  /* type    */
160 		data  = type << ffs_nr(mask);
161 
162 		mask |= ENDPTCTRL_RXS;  /* unstall */
163 		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
164 		data |= ENDPTCTRL_RXR;
165 		mask |= ENDPTCTRL_RXE;  /* enable  */
166 		data |= ENDPTCTRL_RXE;
167 	}
168 	hw_write(udc, OP_ENDPTCTRL + num, mask, data);
169 	return 0;
170 }
171 
172 /**
173  * hw_ep_get_halt: return endpoint halt status
174  * @num: endpoint number
175  * @dir: endpoint direction
176  *
177  * This function returns 1 if endpoint halted
178  */
179 static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
180 {
181 	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
182 
183 	return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
184 }
185 
186 /**
187  * hw_test_and_clear_setup_status: test & clear setup status (execute without
188  *                                 interruption)
189  * @n: endpoint number
190  *
191  * This function returns setup status
192  */
193 static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
194 {
195 	n = ep_to_bit(udc, n);
196 	return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
197 }
198 
199 /**
200  * hw_ep_prime: primes endpoint (execute without interruption)
201  * @num:     endpoint number
202  * @dir:     endpoint direction
203  * @is_ctrl: true if control endpoint
204  *
205  * This function returns an error code
206  */
207 static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
208 {
209 	int n = hw_ep_bit(num, dir);
210 
211 	if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
212 		return -EAGAIN;
213 
214 	hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
215 
216 	while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
217 		cpu_relax();
218 	if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
219 		return -EAGAIN;
220 
221 	/* status shoult be tested according with manual but it doesn't work */
222 	return 0;
223 }
224 
225 /**
226  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
227  *                 without interruption)
228  * @num:   endpoint number
229  * @dir:   endpoint direction
230  * @value: true => stall, false => unstall
231  *
232  * This function returns an error code
233  */
234 static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
235 {
236 	if (value != 0 && value != 1)
237 		return -EINVAL;
238 
239 	do {
240 		enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
241 		u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
242 		u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
243 
244 		/* data toggle - reserved for EP0 but it's in ESS */
245 		hw_write(udc, reg, mask_xs|mask_xr,
246 			  value ? mask_xs : mask_xr);
247 	} while (value != hw_ep_get_halt(udc, num, dir));
248 
249 	return 0;
250 }
251 
252 /**
253  * hw_is_port_high_speed: test if port is high speed
254  *
255  * This function returns true if high speed port
256  */
257 static int hw_port_is_high_speed(struct ci13xxx *udc)
258 {
259 	return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
260 		hw_read(udc, OP_PORTSC, PORTSC_HSP);
261 }
262 
263 /**
264  * hw_read_intr_enable: returns interrupt enable register
265  *
266  * This function returns register data
267  */
268 static u32 hw_read_intr_enable(struct ci13xxx *udc)
269 {
270 	return hw_read(udc, OP_USBINTR, ~0);
271 }
272 
273 /**
274  * hw_read_intr_status: returns interrupt status register
275  *
276  * This function returns register data
277  */
278 static u32 hw_read_intr_status(struct ci13xxx *udc)
279 {
280 	return hw_read(udc, OP_USBSTS, ~0);
281 }
282 
283 /**
284  * hw_test_and_clear_complete: test & clear complete status (execute without
285  *                             interruption)
286  * @n: endpoint number
287  *
288  * This function returns complete status
289  */
290 static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
291 {
292 	n = ep_to_bit(udc, n);
293 	return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
294 }
295 
296 /**
297  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
298  *                                without interruption)
299  *
300  * This function returns active interrutps
301  */
302 static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
303 {
304 	u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
305 
306 	hw_write(udc, OP_USBSTS, ~0, reg);
307 	return reg;
308 }
309 
310 /**
311  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
312  *                                interruption)
313  *
314  * This function returns guard value
315  */
316 static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
317 {
318 	return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
319 }
320 
321 /**
322  * hw_test_and_set_setup_guard: test & set setup guard (execute without
323  *                              interruption)
324  *
325  * This function returns guard value
326  */
327 static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
328 {
329 	return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
330 }
331 
332 /**
333  * hw_usb_set_address: configures USB address (execute without interruption)
334  * @value: new USB address
335  *
336  * This function explicitly sets the address, without the "USBADRA" (advance)
337  * feature, which is not supported by older versions of the controller.
338  */
339 static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
340 {
341 	hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
342 		 value << ffs_nr(DEVICEADDR_USBADR));
343 }
344 
345 /**
346  * hw_usb_reset: restart device after a bus reset (execute without
347  *               interruption)
348  *
349  * This function returns an error code
350  */
351 static int hw_usb_reset(struct ci13xxx *udc)
352 {
353 	hw_usb_set_address(udc, 0);
354 
355 	/* ESS flushes only at end?!? */
356 	hw_write(udc, OP_ENDPTFLUSH,    ~0, ~0);
357 
358 	/* clear setup token semaphores */
359 	hw_write(udc, OP_ENDPTSETUPSTAT, 0,  0);
360 
361 	/* clear complete status */
362 	hw_write(udc, OP_ENDPTCOMPLETE,  0,  0);
363 
364 	/* wait until all bits cleared */
365 	while (hw_read(udc, OP_ENDPTPRIME, ~0))
366 		udelay(10);             /* not RTOS friendly */
367 
368 	/* reset all endpoints ? */
369 
370 	/* reset internal status and wait for further instructions
371 	   no need to verify the port reset status (ESS does it) */
372 
373 	return 0;
374 }
375 
376 /******************************************************************************
377  * UTIL block
378  *****************************************************************************/
379 /**
380  * _usb_addr: calculates endpoint address from direction & number
381  * @ep:  endpoint
382  */
383 static inline u8 _usb_addr(struct ci13xxx_ep *ep)
384 {
385 	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
386 }
387 
388 /**
389  * _hardware_queue: configures a request at hardware level
390  * @gadget: gadget
391  * @mEp:    endpoint
392  *
393  * This function returns an error code
394  */
395 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
396 {
397 	struct ci13xxx *udc = mEp->udc;
398 	unsigned i;
399 	int ret = 0;
400 	unsigned length = mReq->req.length;
401 
402 	/* don't queue twice */
403 	if (mReq->req.status == -EALREADY)
404 		return -EALREADY;
405 
406 	mReq->req.status = -EALREADY;
407 
408 	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
409 		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
410 					   &mReq->zdma);
411 		if (mReq->zptr == NULL)
412 			return -ENOMEM;
413 
414 		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
415 		mReq->zptr->next    = TD_TERMINATE;
416 		mReq->zptr->token   = TD_STATUS_ACTIVE;
417 		if (!mReq->req.no_interrupt)
418 			mReq->zptr->token   |= TD_IOC;
419 	}
420 	ret = usb_gadget_map_request(&udc->gadget, &mReq->req, mEp->dir);
421 	if (ret)
422 		return ret;
423 
424 	/*
425 	 * TD configuration
426 	 * TODO - handle requests which spawns into several TDs
427 	 */
428 	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
429 	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
430 	mReq->ptr->token   &= TD_TOTAL_BYTES;
431 	mReq->ptr->token   |= TD_STATUS_ACTIVE;
432 	if (mReq->zptr) {
433 		mReq->ptr->next    = mReq->zdma;
434 	} else {
435 		mReq->ptr->next    = TD_TERMINATE;
436 		if (!mReq->req.no_interrupt)
437 			mReq->ptr->token  |= TD_IOC;
438 	}
439 	mReq->ptr->page[0]  = mReq->req.dma;
440 	for (i = 1; i < 5; i++)
441 		mReq->ptr->page[i] =
442 			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
443 
444 	if (!list_empty(&mEp->qh.queue)) {
445 		struct ci13xxx_req *mReqPrev;
446 		int n = hw_ep_bit(mEp->num, mEp->dir);
447 		int tmp_stat;
448 
449 		mReqPrev = list_entry(mEp->qh.queue.prev,
450 				struct ci13xxx_req, queue);
451 		if (mReqPrev->zptr)
452 			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
453 		else
454 			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
455 		wmb();
456 		if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
457 			goto done;
458 		do {
459 			hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
460 			tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
461 		} while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
462 		hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
463 		if (tmp_stat)
464 			goto done;
465 	}
466 
467 	/*  QH configuration */
468 	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
469 	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
470 	mEp->qh.ptr->cap |=  QH_ZLT;
471 
472 	wmb();   /* synchronize before ep prime */
473 
474 	ret = hw_ep_prime(udc, mEp->num, mEp->dir,
475 			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
476 done:
477 	return ret;
478 }
479 
480 /**
481  * _hardware_dequeue: handles a request at hardware level
482  * @gadget: gadget
483  * @mEp:    endpoint
484  *
485  * This function returns an error code
486  */
487 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
488 {
489 	if (mReq->req.status != -EALREADY)
490 		return -EINVAL;
491 
492 	if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
493 		return -EBUSY;
494 
495 	if (mReq->zptr) {
496 		if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
497 			return -EBUSY;
498 		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
499 		mReq->zptr = NULL;
500 	}
501 
502 	mReq->req.status = 0;
503 
504 	usb_gadget_unmap_request(&mEp->udc->gadget, &mReq->req, mEp->dir);
505 
506 	mReq->req.status = mReq->ptr->token & TD_STATUS;
507 	if ((TD_STATUS_HALTED & mReq->req.status) != 0)
508 		mReq->req.status = -1;
509 	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
510 		mReq->req.status = -1;
511 	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
512 		mReq->req.status = -1;
513 
514 	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
515 	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
516 	mReq->req.actual   = mReq->req.length - mReq->req.actual;
517 	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
518 
519 	return mReq->req.actual;
520 }
521 
522 /**
523  * _ep_nuke: dequeues all endpoint requests
524  * @mEp: endpoint
525  *
526  * This function returns an error code
527  * Caller must hold lock
528  */
529 static int _ep_nuke(struct ci13xxx_ep *mEp)
530 __releases(mEp->lock)
531 __acquires(mEp->lock)
532 {
533 	if (mEp == NULL)
534 		return -EINVAL;
535 
536 	hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
537 
538 	while (!list_empty(&mEp->qh.queue)) {
539 
540 		/* pop oldest request */
541 		struct ci13xxx_req *mReq = \
542 			list_entry(mEp->qh.queue.next,
543 				   struct ci13xxx_req, queue);
544 		list_del_init(&mReq->queue);
545 		mReq->req.status = -ESHUTDOWN;
546 
547 		if (mReq->req.complete != NULL) {
548 			spin_unlock(mEp->lock);
549 			mReq->req.complete(&mEp->ep, &mReq->req);
550 			spin_lock(mEp->lock);
551 		}
552 	}
553 	return 0;
554 }
555 
556 /**
557  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
558  * @gadget: gadget
559  *
560  * This function returns an error code
561  */
562 static int _gadget_stop_activity(struct usb_gadget *gadget)
563 {
564 	struct usb_ep *ep;
565 	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
566 	unsigned long flags;
567 
568 	spin_lock_irqsave(&udc->lock, flags);
569 	udc->gadget.speed = USB_SPEED_UNKNOWN;
570 	udc->remote_wakeup = 0;
571 	udc->suspended = 0;
572 	spin_unlock_irqrestore(&udc->lock, flags);
573 
574 	/* flush all endpoints */
575 	gadget_for_each_ep(ep, gadget) {
576 		usb_ep_fifo_flush(ep);
577 	}
578 	usb_ep_fifo_flush(&udc->ep0out->ep);
579 	usb_ep_fifo_flush(&udc->ep0in->ep);
580 
581 	if (udc->driver)
582 		udc->driver->disconnect(gadget);
583 
584 	/* make sure to disable all endpoints */
585 	gadget_for_each_ep(ep, gadget) {
586 		usb_ep_disable(ep);
587 	}
588 
589 	if (udc->status != NULL) {
590 		usb_ep_free_request(&udc->ep0in->ep, udc->status);
591 		udc->status = NULL;
592 	}
593 
594 	return 0;
595 }
596 
597 /******************************************************************************
598  * ISR block
599  *****************************************************************************/
600 /**
601  * isr_reset_handler: USB reset interrupt handler
602  * @udc: UDC device
603  *
604  * This function resets USB engine after a bus reset occurred
605  */
606 static void isr_reset_handler(struct ci13xxx *udc)
607 __releases(udc->lock)
608 __acquires(udc->lock)
609 {
610 	int retval;
611 
612 	dbg_event(0xFF, "BUS RST", 0);
613 
614 	spin_unlock(&udc->lock);
615 	retval = _gadget_stop_activity(&udc->gadget);
616 	if (retval)
617 		goto done;
618 
619 	retval = hw_usb_reset(udc);
620 	if (retval)
621 		goto done;
622 
623 	udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
624 	if (udc->status == NULL)
625 		retval = -ENOMEM;
626 
627 done:
628 	spin_lock(&udc->lock);
629 
630 	if (retval)
631 		dev_err(udc->dev, "error: %i\n", retval);
632 }
633 
634 /**
635  * isr_get_status_complete: get_status request complete function
636  * @ep:  endpoint
637  * @req: request handled
638  *
639  * Caller must release lock
640  */
641 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
642 {
643 	if (ep == NULL || req == NULL)
644 		return;
645 
646 	kfree(req->buf);
647 	usb_ep_free_request(ep, req);
648 }
649 
650 /**
651  * isr_get_status_response: get_status request response
652  * @udc: udc struct
653  * @setup: setup request packet
654  *
655  * This function returns an error code
656  */
657 static int isr_get_status_response(struct ci13xxx *udc,
658 				   struct usb_ctrlrequest *setup)
659 __releases(mEp->lock)
660 __acquires(mEp->lock)
661 {
662 	struct ci13xxx_ep *mEp = udc->ep0in;
663 	struct usb_request *req = NULL;
664 	gfp_t gfp_flags = GFP_ATOMIC;
665 	int dir, num, retval;
666 
667 	if (mEp == NULL || setup == NULL)
668 		return -EINVAL;
669 
670 	spin_unlock(mEp->lock);
671 	req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
672 	spin_lock(mEp->lock);
673 	if (req == NULL)
674 		return -ENOMEM;
675 
676 	req->complete = isr_get_status_complete;
677 	req->length   = 2;
678 	req->buf      = kzalloc(req->length, gfp_flags);
679 	if (req->buf == NULL) {
680 		retval = -ENOMEM;
681 		goto err_free_req;
682 	}
683 
684 	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
685 		/* Assume that device is bus powered for now. */
686 		*(u16 *)req->buf = udc->remote_wakeup << 1;
687 		retval = 0;
688 	} else if ((setup->bRequestType & USB_RECIP_MASK) \
689 		   == USB_RECIP_ENDPOINT) {
690 		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
691 			TX : RX;
692 		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
693 		*(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
694 	}
695 	/* else do nothing; reserved for future use */
696 
697 	spin_unlock(mEp->lock);
698 	retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
699 	spin_lock(mEp->lock);
700 	if (retval)
701 		goto err_free_buf;
702 
703 	return 0;
704 
705  err_free_buf:
706 	kfree(req->buf);
707  err_free_req:
708 	spin_unlock(mEp->lock);
709 	usb_ep_free_request(&mEp->ep, req);
710 	spin_lock(mEp->lock);
711 	return retval;
712 }
713 
714 /**
715  * isr_setup_status_complete: setup_status request complete function
716  * @ep:  endpoint
717  * @req: request handled
718  *
719  * Caller must release lock. Put the port in test mode if test mode
720  * feature is selected.
721  */
722 static void
723 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
724 {
725 	struct ci13xxx *udc = req->context;
726 	unsigned long flags;
727 
728 	if (udc->setaddr) {
729 		hw_usb_set_address(udc, udc->address);
730 		udc->setaddr = false;
731 	}
732 
733 	spin_lock_irqsave(&udc->lock, flags);
734 	if (udc->test_mode)
735 		hw_port_test_set(udc, udc->test_mode);
736 	spin_unlock_irqrestore(&udc->lock, flags);
737 }
738 
739 /**
740  * isr_setup_status_phase: queues the status phase of a setup transation
741  * @udc: udc struct
742  *
743  * This function returns an error code
744  */
745 static int isr_setup_status_phase(struct ci13xxx *udc)
746 __releases(mEp->lock)
747 __acquires(mEp->lock)
748 {
749 	int retval;
750 	struct ci13xxx_ep *mEp;
751 
752 	mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
753 	udc->status->context = udc;
754 	udc->status->complete = isr_setup_status_complete;
755 
756 	spin_unlock(mEp->lock);
757 	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
758 	spin_lock(mEp->lock);
759 
760 	return retval;
761 }
762 
763 /**
764  * isr_tr_complete_low: transaction complete low level handler
765  * @mEp: endpoint
766  *
767  * This function returns an error code
768  * Caller must hold lock
769  */
770 static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
771 __releases(mEp->lock)
772 __acquires(mEp->lock)
773 {
774 	struct ci13xxx_req *mReq, *mReqTemp;
775 	struct ci13xxx_ep *mEpTemp = mEp;
776 	int uninitialized_var(retval);
777 
778 	if (list_empty(&mEp->qh.queue))
779 		return -EINVAL;
780 
781 	list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
782 			queue) {
783 		retval = _hardware_dequeue(mEp, mReq);
784 		if (retval < 0)
785 			break;
786 		list_del_init(&mReq->queue);
787 		dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
788 		if (mReq->req.complete != NULL) {
789 			spin_unlock(mEp->lock);
790 			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
791 					mReq->req.length)
792 				mEpTemp = mEp->udc->ep0in;
793 			mReq->req.complete(&mEpTemp->ep, &mReq->req);
794 			spin_lock(mEp->lock);
795 		}
796 	}
797 
798 	if (retval == -EBUSY)
799 		retval = 0;
800 	if (retval < 0)
801 		dbg_event(_usb_addr(mEp), "DONE", retval);
802 
803 	return retval;
804 }
805 
806 /**
807  * isr_tr_complete_handler: transaction complete interrupt handler
808  * @udc: UDC descriptor
809  *
810  * This function handles traffic events
811  */
812 static void isr_tr_complete_handler(struct ci13xxx *udc)
813 __releases(udc->lock)
814 __acquires(udc->lock)
815 {
816 	unsigned i;
817 	u8 tmode = 0;
818 
819 	for (i = 0; i < udc->hw_ep_max; i++) {
820 		struct ci13xxx_ep *mEp  = &udc->ci13xxx_ep[i];
821 		int type, num, dir, err = -EINVAL;
822 		struct usb_ctrlrequest req;
823 
824 		if (mEp->ep.desc == NULL)
825 			continue;   /* not configured */
826 
827 		if (hw_test_and_clear_complete(udc, i)) {
828 			err = isr_tr_complete_low(mEp);
829 			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
830 				if (err > 0)   /* needs status phase */
831 					err = isr_setup_status_phase(udc);
832 				if (err < 0) {
833 					dbg_event(_usb_addr(mEp),
834 						  "ERROR", err);
835 					spin_unlock(&udc->lock);
836 					if (usb_ep_set_halt(&mEp->ep))
837 						dev_err(udc->dev,
838 							"error: ep_set_halt\n");
839 					spin_lock(&udc->lock);
840 				}
841 			}
842 		}
843 
844 		if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
845 		    !hw_test_and_clear_setup_status(udc, i))
846 			continue;
847 
848 		if (i != 0) {
849 			dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
850 			continue;
851 		}
852 
853 		/*
854 		 * Flush data and handshake transactions of previous
855 		 * setup packet.
856 		 */
857 		_ep_nuke(udc->ep0out);
858 		_ep_nuke(udc->ep0in);
859 
860 		/* read_setup_packet */
861 		do {
862 			hw_test_and_set_setup_guard(udc);
863 			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
864 		} while (!hw_test_and_clear_setup_guard(udc));
865 
866 		type = req.bRequestType;
867 
868 		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
869 
870 		dbg_setup(_usb_addr(mEp), &req);
871 
872 		switch (req.bRequest) {
873 		case USB_REQ_CLEAR_FEATURE:
874 			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
875 					le16_to_cpu(req.wValue) ==
876 					USB_ENDPOINT_HALT) {
877 				if (req.wLength != 0)
878 					break;
879 				num  = le16_to_cpu(req.wIndex);
880 				dir = num & USB_ENDPOINT_DIR_MASK;
881 				num &= USB_ENDPOINT_NUMBER_MASK;
882 				if (dir) /* TX */
883 					num += udc->hw_ep_max/2;
884 				if (!udc->ci13xxx_ep[num].wedge) {
885 					spin_unlock(&udc->lock);
886 					err = usb_ep_clear_halt(
887 						&udc->ci13xxx_ep[num].ep);
888 					spin_lock(&udc->lock);
889 					if (err)
890 						break;
891 				}
892 				err = isr_setup_status_phase(udc);
893 			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
894 					le16_to_cpu(req.wValue) ==
895 					USB_DEVICE_REMOTE_WAKEUP) {
896 				if (req.wLength != 0)
897 					break;
898 				udc->remote_wakeup = 0;
899 				err = isr_setup_status_phase(udc);
900 			} else {
901 				goto delegate;
902 			}
903 			break;
904 		case USB_REQ_GET_STATUS:
905 			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
906 			    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
907 			    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
908 				goto delegate;
909 			if (le16_to_cpu(req.wLength) != 2 ||
910 			    le16_to_cpu(req.wValue)  != 0)
911 				break;
912 			err = isr_get_status_response(udc, &req);
913 			break;
914 		case USB_REQ_SET_ADDRESS:
915 			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
916 				goto delegate;
917 			if (le16_to_cpu(req.wLength) != 0 ||
918 			    le16_to_cpu(req.wIndex)  != 0)
919 				break;
920 			udc->address = (u8)le16_to_cpu(req.wValue);
921 			udc->setaddr = true;
922 			err = isr_setup_status_phase(udc);
923 			break;
924 		case USB_REQ_SET_FEATURE:
925 			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
926 					le16_to_cpu(req.wValue) ==
927 					USB_ENDPOINT_HALT) {
928 				if (req.wLength != 0)
929 					break;
930 				num  = le16_to_cpu(req.wIndex);
931 				dir = num & USB_ENDPOINT_DIR_MASK;
932 				num &= USB_ENDPOINT_NUMBER_MASK;
933 				if (dir) /* TX */
934 					num += udc->hw_ep_max/2;
935 
936 				spin_unlock(&udc->lock);
937 				err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
938 				spin_lock(&udc->lock);
939 				if (!err)
940 					isr_setup_status_phase(udc);
941 			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
942 				if (req.wLength != 0)
943 					break;
944 				switch (le16_to_cpu(req.wValue)) {
945 				case USB_DEVICE_REMOTE_WAKEUP:
946 					udc->remote_wakeup = 1;
947 					err = isr_setup_status_phase(udc);
948 					break;
949 				case USB_DEVICE_TEST_MODE:
950 					tmode = le16_to_cpu(req.wIndex) >> 8;
951 					switch (tmode) {
952 					case TEST_J:
953 					case TEST_K:
954 					case TEST_SE0_NAK:
955 					case TEST_PACKET:
956 					case TEST_FORCE_EN:
957 						udc->test_mode = tmode;
958 						err = isr_setup_status_phase(
959 								udc);
960 						break;
961 					default:
962 						break;
963 					}
964 				default:
965 					goto delegate;
966 				}
967 			} else {
968 				goto delegate;
969 			}
970 			break;
971 		default:
972 delegate:
973 			if (req.wLength == 0)   /* no data phase */
974 				udc->ep0_dir = TX;
975 
976 			spin_unlock(&udc->lock);
977 			err = udc->driver->setup(&udc->gadget, &req);
978 			spin_lock(&udc->lock);
979 			break;
980 		}
981 
982 		if (err < 0) {
983 			dbg_event(_usb_addr(mEp), "ERROR", err);
984 
985 			spin_unlock(&udc->lock);
986 			if (usb_ep_set_halt(&mEp->ep))
987 				dev_err(udc->dev, "error: ep_set_halt\n");
988 			spin_lock(&udc->lock);
989 		}
990 	}
991 }
992 
993 /******************************************************************************
994  * ENDPT block
995  *****************************************************************************/
996 /**
997  * ep_enable: configure endpoint, making it usable
998  *
999  * Check usb_ep_enable() at "usb_gadget.h" for details
1000  */
1001 static int ep_enable(struct usb_ep *ep,
1002 		     const struct usb_endpoint_descriptor *desc)
1003 {
1004 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1005 	int retval = 0;
1006 	unsigned long flags;
1007 
1008 	if (ep == NULL || desc == NULL)
1009 		return -EINVAL;
1010 
1011 	spin_lock_irqsave(mEp->lock, flags);
1012 
1013 	/* only internal SW should enable ctrl endpts */
1014 
1015 	mEp->ep.desc = desc;
1016 
1017 	if (!list_empty(&mEp->qh.queue))
1018 		dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
1019 
1020 	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1021 	mEp->num  = usb_endpoint_num(desc);
1022 	mEp->type = usb_endpoint_type(desc);
1023 
1024 	mEp->ep.maxpacket = usb_endpoint_maxp(desc);
1025 
1026 	dbg_event(_usb_addr(mEp), "ENABLE", 0);
1027 
1028 	mEp->qh.ptr->cap = 0;
1029 
1030 	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1031 		mEp->qh.ptr->cap |=  QH_IOS;
1032 	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1033 		mEp->qh.ptr->cap &= ~QH_MULT;
1034 	else
1035 		mEp->qh.ptr->cap &= ~QH_ZLT;
1036 
1037 	mEp->qh.ptr->cap |=
1038 		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
1039 	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
1040 
1041 	/*
1042 	 * Enable endpoints in the HW other than ep0 as ep0
1043 	 * is always enabled
1044 	 */
1045 	if (mEp->num)
1046 		retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
1047 
1048 	spin_unlock_irqrestore(mEp->lock, flags);
1049 	return retval;
1050 }
1051 
1052 /**
1053  * ep_disable: endpoint is no longer usable
1054  *
1055  * Check usb_ep_disable() at "usb_gadget.h" for details
1056  */
1057 static int ep_disable(struct usb_ep *ep)
1058 {
1059 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1060 	int direction, retval = 0;
1061 	unsigned long flags;
1062 
1063 	if (ep == NULL)
1064 		return -EINVAL;
1065 	else if (mEp->ep.desc == NULL)
1066 		return -EBUSY;
1067 
1068 	spin_lock_irqsave(mEp->lock, flags);
1069 
1070 	/* only internal SW should disable ctrl endpts */
1071 
1072 	direction = mEp->dir;
1073 	do {
1074 		dbg_event(_usb_addr(mEp), "DISABLE", 0);
1075 
1076 		retval |= _ep_nuke(mEp);
1077 		retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
1078 
1079 		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1080 			mEp->dir = (mEp->dir == TX) ? RX : TX;
1081 
1082 	} while (mEp->dir != direction);
1083 
1084 	mEp->ep.desc = NULL;
1085 
1086 	spin_unlock_irqrestore(mEp->lock, flags);
1087 	return retval;
1088 }
1089 
1090 /**
1091  * ep_alloc_request: allocate a request object to use with this endpoint
1092  *
1093  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1094  */
1095 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1096 {
1097 	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
1098 	struct ci13xxx_req *mReq = NULL;
1099 
1100 	if (ep == NULL)
1101 		return NULL;
1102 
1103 	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1104 	if (mReq != NULL) {
1105 		INIT_LIST_HEAD(&mReq->queue);
1106 
1107 		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1108 					   &mReq->dma);
1109 		if (mReq->ptr == NULL) {
1110 			kfree(mReq);
1111 			mReq = NULL;
1112 		}
1113 	}
1114 
1115 	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
1116 
1117 	return (mReq == NULL) ? NULL : &mReq->req;
1118 }
1119 
1120 /**
1121  * ep_free_request: frees a request object
1122  *
1123  * Check usb_ep_free_request() at "usb_gadget.h" for details
1124  */
1125 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1126 {
1127 	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
1128 	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1129 	unsigned long flags;
1130 
1131 	if (ep == NULL || req == NULL) {
1132 		return;
1133 	} else if (!list_empty(&mReq->queue)) {
1134 		dev_err(mEp->udc->dev, "freeing queued request\n");
1135 		return;
1136 	}
1137 
1138 	spin_lock_irqsave(mEp->lock, flags);
1139 
1140 	if (mReq->ptr)
1141 		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
1142 	kfree(mReq);
1143 
1144 	dbg_event(_usb_addr(mEp), "FREE", 0);
1145 
1146 	spin_unlock_irqrestore(mEp->lock, flags);
1147 }
1148 
1149 /**
1150  * ep_queue: queues (submits) an I/O request to an endpoint
1151  *
1152  * Check usb_ep_queue()* at usb_gadget.h" for details
1153  */
1154 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1155 		    gfp_t __maybe_unused gfp_flags)
1156 {
1157 	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
1158 	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1159 	struct ci13xxx *udc = mEp->udc;
1160 	int retval = 0;
1161 	unsigned long flags;
1162 
1163 	if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
1164 		return -EINVAL;
1165 
1166 	spin_lock_irqsave(mEp->lock, flags);
1167 
1168 	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1169 		if (req->length)
1170 			mEp = (udc->ep0_dir == RX) ?
1171 			       udc->ep0out : udc->ep0in;
1172 		if (!list_empty(&mEp->qh.queue)) {
1173 			_ep_nuke(mEp);
1174 			retval = -EOVERFLOW;
1175 			dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
1176 				 _usb_addr(mEp));
1177 		}
1178 	}
1179 
1180 	/* first nuke then test link, e.g. previous status has not sent */
1181 	if (!list_empty(&mReq->queue)) {
1182 		retval = -EBUSY;
1183 		dev_err(mEp->udc->dev, "request already in queue\n");
1184 		goto done;
1185 	}
1186 
1187 	if (req->length > 4 * CI13XXX_PAGE_SIZE) {
1188 		req->length = 4 * CI13XXX_PAGE_SIZE;
1189 		retval = -EMSGSIZE;
1190 		dev_warn(mEp->udc->dev, "request length truncated\n");
1191 	}
1192 
1193 	dbg_queue(_usb_addr(mEp), req, retval);
1194 
1195 	/* push request */
1196 	mReq->req.status = -EINPROGRESS;
1197 	mReq->req.actual = 0;
1198 
1199 	retval = _hardware_enqueue(mEp, mReq);
1200 
1201 	if (retval == -EALREADY) {
1202 		dbg_event(_usb_addr(mEp), "QUEUE", retval);
1203 		retval = 0;
1204 	}
1205 	if (!retval)
1206 		list_add_tail(&mReq->queue, &mEp->qh.queue);
1207 
1208  done:
1209 	spin_unlock_irqrestore(mEp->lock, flags);
1210 	return retval;
1211 }
1212 
1213 /**
1214  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1215  *
1216  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1217  */
1218 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1219 {
1220 	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
1221 	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1222 	unsigned long flags;
1223 
1224 	if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
1225 		mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
1226 		list_empty(&mEp->qh.queue))
1227 		return -EINVAL;
1228 
1229 	spin_lock_irqsave(mEp->lock, flags);
1230 
1231 	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
1232 
1233 	hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1234 
1235 	/* pop request */
1236 	list_del_init(&mReq->queue);
1237 
1238 	usb_gadget_unmap_request(&mEp->udc->gadget, req, mEp->dir);
1239 
1240 	req->status = -ECONNRESET;
1241 
1242 	if (mReq->req.complete != NULL) {
1243 		spin_unlock(mEp->lock);
1244 		mReq->req.complete(&mEp->ep, &mReq->req);
1245 		spin_lock(mEp->lock);
1246 	}
1247 
1248 	spin_unlock_irqrestore(mEp->lock, flags);
1249 	return 0;
1250 }
1251 
1252 /**
1253  * ep_set_halt: sets the endpoint halt feature
1254  *
1255  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1256  */
1257 static int ep_set_halt(struct usb_ep *ep, int value)
1258 {
1259 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1260 	int direction, retval = 0;
1261 	unsigned long flags;
1262 
1263 	if (ep == NULL || mEp->ep.desc == NULL)
1264 		return -EINVAL;
1265 
1266 	spin_lock_irqsave(mEp->lock, flags);
1267 
1268 #ifndef STALL_IN
1269 	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1270 	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
1271 	    !list_empty(&mEp->qh.queue)) {
1272 		spin_unlock_irqrestore(mEp->lock, flags);
1273 		return -EAGAIN;
1274 	}
1275 #endif
1276 
1277 	direction = mEp->dir;
1278 	do {
1279 		dbg_event(_usb_addr(mEp), "HALT", value);
1280 		retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
1281 
1282 		if (!value)
1283 			mEp->wedge = 0;
1284 
1285 		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1286 			mEp->dir = (mEp->dir == TX) ? RX : TX;
1287 
1288 	} while (mEp->dir != direction);
1289 
1290 	spin_unlock_irqrestore(mEp->lock, flags);
1291 	return retval;
1292 }
1293 
1294 /**
1295  * ep_set_wedge: sets the halt feature and ignores clear requests
1296  *
1297  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1298  */
1299 static int ep_set_wedge(struct usb_ep *ep)
1300 {
1301 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1302 	unsigned long flags;
1303 
1304 	if (ep == NULL || mEp->ep.desc == NULL)
1305 		return -EINVAL;
1306 
1307 	spin_lock_irqsave(mEp->lock, flags);
1308 
1309 	dbg_event(_usb_addr(mEp), "WEDGE", 0);
1310 	mEp->wedge = 1;
1311 
1312 	spin_unlock_irqrestore(mEp->lock, flags);
1313 
1314 	return usb_ep_set_halt(ep);
1315 }
1316 
1317 /**
1318  * ep_fifo_flush: flushes contents of a fifo
1319  *
1320  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1321  */
1322 static void ep_fifo_flush(struct usb_ep *ep)
1323 {
1324 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1325 	unsigned long flags;
1326 
1327 	if (ep == NULL) {
1328 		dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
1329 		return;
1330 	}
1331 
1332 	spin_lock_irqsave(mEp->lock, flags);
1333 
1334 	dbg_event(_usb_addr(mEp), "FFLUSH", 0);
1335 	hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1336 
1337 	spin_unlock_irqrestore(mEp->lock, flags);
1338 }
1339 
1340 /**
1341  * Endpoint-specific part of the API to the USB controller hardware
1342  * Check "usb_gadget.h" for details
1343  */
1344 static const struct usb_ep_ops usb_ep_ops = {
1345 	.enable	       = ep_enable,
1346 	.disable       = ep_disable,
1347 	.alloc_request = ep_alloc_request,
1348 	.free_request  = ep_free_request,
1349 	.queue	       = ep_queue,
1350 	.dequeue       = ep_dequeue,
1351 	.set_halt      = ep_set_halt,
1352 	.set_wedge     = ep_set_wedge,
1353 	.fifo_flush    = ep_fifo_flush,
1354 };
1355 
1356 /******************************************************************************
1357  * GADGET block
1358  *****************************************************************************/
1359 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
1360 {
1361 	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1362 	unsigned long flags;
1363 	int gadget_ready = 0;
1364 
1365 	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
1366 		return -EOPNOTSUPP;
1367 
1368 	spin_lock_irqsave(&udc->lock, flags);
1369 	udc->vbus_active = is_active;
1370 	if (udc->driver)
1371 		gadget_ready = 1;
1372 	spin_unlock_irqrestore(&udc->lock, flags);
1373 
1374 	if (gadget_ready) {
1375 		if (is_active) {
1376 			pm_runtime_get_sync(&_gadget->dev);
1377 			hw_device_reset(udc, USBMODE_CM_DC);
1378 			hw_device_state(udc, udc->ep0out->qh.dma);
1379 		} else {
1380 			hw_device_state(udc, 0);
1381 			if (udc->udc_driver->notify_event)
1382 				udc->udc_driver->notify_event(udc,
1383 				CI13XXX_CONTROLLER_STOPPED_EVENT);
1384 			_gadget_stop_activity(&udc->gadget);
1385 			pm_runtime_put_sync(&_gadget->dev);
1386 		}
1387 	}
1388 
1389 	return 0;
1390 }
1391 
1392 static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1393 {
1394 	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1395 	unsigned long flags;
1396 	int ret = 0;
1397 
1398 	spin_lock_irqsave(&udc->lock, flags);
1399 	if (!udc->remote_wakeup) {
1400 		ret = -EOPNOTSUPP;
1401 		goto out;
1402 	}
1403 	if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
1404 		ret = -EINVAL;
1405 		goto out;
1406 	}
1407 	hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1408 out:
1409 	spin_unlock_irqrestore(&udc->lock, flags);
1410 	return ret;
1411 }
1412 
1413 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1414 {
1415 	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1416 
1417 	if (udc->transceiver)
1418 		return usb_phy_set_power(udc->transceiver, mA);
1419 	return -ENOTSUPP;
1420 }
1421 
1422 static int ci13xxx_start(struct usb_gadget *gadget,
1423 			 struct usb_gadget_driver *driver);
1424 static int ci13xxx_stop(struct usb_gadget *gadget,
1425 			struct usb_gadget_driver *driver);
1426 /**
1427  * Device operations part of the API to the USB controller hardware,
1428  * which don't involve endpoints (or i/o)
1429  * Check  "usb_gadget.h" for details
1430  */
1431 static const struct usb_gadget_ops usb_gadget_ops = {
1432 	.vbus_session	= ci13xxx_vbus_session,
1433 	.wakeup		= ci13xxx_wakeup,
1434 	.vbus_draw	= ci13xxx_vbus_draw,
1435 	.udc_start	= ci13xxx_start,
1436 	.udc_stop	= ci13xxx_stop,
1437 };
1438 
1439 static int init_eps(struct ci13xxx *udc)
1440 {
1441 	int retval = 0, i, j;
1442 
1443 	for (i = 0; i < udc->hw_ep_max/2; i++)
1444 		for (j = RX; j <= TX; j++) {
1445 			int k = i + j * udc->hw_ep_max/2;
1446 			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
1447 
1448 			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
1449 					(j == TX)  ? "in" : "out");
1450 
1451 			mEp->udc          = udc;
1452 			mEp->lock         = &udc->lock;
1453 			mEp->td_pool      = udc->td_pool;
1454 
1455 			mEp->ep.name      = mEp->name;
1456 			mEp->ep.ops       = &usb_ep_ops;
1457 			mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
1458 
1459 			INIT_LIST_HEAD(&mEp->qh.queue);
1460 			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
1461 						     &mEp->qh.dma);
1462 			if (mEp->qh.ptr == NULL)
1463 				retval = -ENOMEM;
1464 			else
1465 				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
1466 
1467 			/*
1468 			 * set up shorthands for ep0 out and in endpoints,
1469 			 * don't add to gadget's ep_list
1470 			 */
1471 			if (i == 0) {
1472 				if (j == RX)
1473 					udc->ep0out = mEp;
1474 				else
1475 					udc->ep0in = mEp;
1476 
1477 				continue;
1478 			}
1479 
1480 			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
1481 		}
1482 
1483 	return retval;
1484 }
1485 
1486 /**
1487  * ci13xxx_start: register a gadget driver
1488  * @gadget: our gadget
1489  * @driver: the driver being registered
1490  *
1491  * Interrupts are enabled here.
1492  */
1493 static int ci13xxx_start(struct usb_gadget *gadget,
1494 			 struct usb_gadget_driver *driver)
1495 {
1496 	struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1497 	unsigned long flags;
1498 	int retval = -ENOMEM;
1499 
1500 	if (driver->disconnect == NULL)
1501 		return -EINVAL;
1502 
1503 
1504 	udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
1505 	retval = usb_ep_enable(&udc->ep0out->ep);
1506 	if (retval)
1507 		return retval;
1508 
1509 	udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
1510 	retval = usb_ep_enable(&udc->ep0in->ep);
1511 	if (retval)
1512 		return retval;
1513 	spin_lock_irqsave(&udc->lock, flags);
1514 
1515 	udc->driver = driver;
1516 	pm_runtime_get_sync(&udc->gadget.dev);
1517 	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
1518 		if (udc->vbus_active) {
1519 			if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
1520 				hw_device_reset(udc, USBMODE_CM_DC);
1521 		} else {
1522 			pm_runtime_put_sync(&udc->gadget.dev);
1523 			goto done;
1524 		}
1525 	}
1526 
1527 	retval = hw_device_state(udc, udc->ep0out->qh.dma);
1528 	if (retval)
1529 		pm_runtime_put_sync(&udc->gadget.dev);
1530 
1531  done:
1532 	spin_unlock_irqrestore(&udc->lock, flags);
1533 	return retval;
1534 }
1535 
1536 /**
1537  * ci13xxx_stop: unregister a gadget driver
1538  */
1539 static int ci13xxx_stop(struct usb_gadget *gadget,
1540 			struct usb_gadget_driver *driver)
1541 {
1542 	struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1543 	unsigned long flags;
1544 
1545 	spin_lock_irqsave(&udc->lock, flags);
1546 
1547 	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
1548 			udc->vbus_active) {
1549 		hw_device_state(udc, 0);
1550 		if (udc->udc_driver->notify_event)
1551 			udc->udc_driver->notify_event(udc,
1552 			CI13XXX_CONTROLLER_STOPPED_EVENT);
1553 		udc->driver = NULL;
1554 		spin_unlock_irqrestore(&udc->lock, flags);
1555 		_gadget_stop_activity(&udc->gadget);
1556 		spin_lock_irqsave(&udc->lock, flags);
1557 		pm_runtime_put(&udc->gadget.dev);
1558 	}
1559 
1560 	spin_unlock_irqrestore(&udc->lock, flags);
1561 
1562 	return 0;
1563 }
1564 
1565 /******************************************************************************
1566  * BUS block
1567  *****************************************************************************/
1568 /**
1569  * udc_irq: udc interrupt handler
1570  *
1571  * This function returns IRQ_HANDLED if the IRQ has been handled
1572  * It locks access to registers
1573  */
1574 static irqreturn_t udc_irq(struct ci13xxx *udc)
1575 {
1576 	irqreturn_t retval;
1577 	u32 intr;
1578 
1579 	if (udc == NULL)
1580 		return IRQ_HANDLED;
1581 
1582 	spin_lock(&udc->lock);
1583 
1584 	if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
1585 		if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
1586 				USBMODE_CM_DC) {
1587 			spin_unlock(&udc->lock);
1588 			return IRQ_NONE;
1589 		}
1590 	}
1591 	intr = hw_test_and_clear_intr_active(udc);
1592 	dbg_interrupt(intr);
1593 
1594 	if (intr) {
1595 		/* order defines priority - do NOT change it */
1596 		if (USBi_URI & intr)
1597 			isr_reset_handler(udc);
1598 
1599 		if (USBi_PCI & intr) {
1600 			udc->gadget.speed = hw_port_is_high_speed(udc) ?
1601 				USB_SPEED_HIGH : USB_SPEED_FULL;
1602 			if (udc->suspended && udc->driver->resume) {
1603 				spin_unlock(&udc->lock);
1604 				udc->driver->resume(&udc->gadget);
1605 				spin_lock(&udc->lock);
1606 				udc->suspended = 0;
1607 			}
1608 		}
1609 
1610 		if (USBi_UI  & intr)
1611 			isr_tr_complete_handler(udc);
1612 
1613 		if (USBi_SLI & intr) {
1614 			if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
1615 			    udc->driver->suspend) {
1616 				udc->suspended = 1;
1617 				spin_unlock(&udc->lock);
1618 				udc->driver->suspend(&udc->gadget);
1619 				spin_lock(&udc->lock);
1620 			}
1621 		}
1622 		retval = IRQ_HANDLED;
1623 	} else {
1624 		retval = IRQ_NONE;
1625 	}
1626 	spin_unlock(&udc->lock);
1627 
1628 	return retval;
1629 }
1630 
1631 /**
1632  * udc_release: driver release function
1633  * @dev: device
1634  *
1635  * Currently does nothing
1636  */
1637 static void udc_release(struct device *dev)
1638 {
1639 }
1640 
1641 /**
1642  * udc_start: initialize gadget role
1643  * @udc: chipidea controller
1644  */
1645 static int udc_start(struct ci13xxx *udc)
1646 {
1647 	struct device *dev = udc->dev;
1648 	int retval = 0;
1649 
1650 	if (!udc)
1651 		return -EINVAL;
1652 
1653 	spin_lock_init(&udc->lock);
1654 
1655 	udc->gadget.ops          = &usb_gadget_ops;
1656 	udc->gadget.speed        = USB_SPEED_UNKNOWN;
1657 	udc->gadget.max_speed    = USB_SPEED_HIGH;
1658 	udc->gadget.is_otg       = 0;
1659 	udc->gadget.name         = udc->udc_driver->name;
1660 
1661 	INIT_LIST_HEAD(&udc->gadget.ep_list);
1662 
1663 	dev_set_name(&udc->gadget.dev, "gadget");
1664 	udc->gadget.dev.dma_mask = dev->dma_mask;
1665 	udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
1666 	udc->gadget.dev.parent   = dev;
1667 	udc->gadget.dev.release  = udc_release;
1668 
1669 	/* alloc resources */
1670 	udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
1671 				       sizeof(struct ci13xxx_qh),
1672 				       64, CI13XXX_PAGE_SIZE);
1673 	if (udc->qh_pool == NULL)
1674 		return -ENOMEM;
1675 
1676 	udc->td_pool = dma_pool_create("ci13xxx_td", dev,
1677 				       sizeof(struct ci13xxx_td),
1678 				       64, CI13XXX_PAGE_SIZE);
1679 	if (udc->td_pool == NULL) {
1680 		retval = -ENOMEM;
1681 		goto free_qh_pool;
1682 	}
1683 
1684 	retval = init_eps(udc);
1685 	if (retval)
1686 		goto free_pools;
1687 
1688 	udc->gadget.ep0 = &udc->ep0in->ep;
1689 
1690 	udc->transceiver = usb_get_transceiver();
1691 
1692 	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1693 		if (udc->transceiver == NULL) {
1694 			retval = -ENODEV;
1695 			goto free_pools;
1696 		}
1697 	}
1698 
1699 	if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
1700 		retval = hw_device_reset(udc, USBMODE_CM_DC);
1701 		if (retval)
1702 			goto put_transceiver;
1703 	}
1704 
1705 	retval = device_register(&udc->gadget.dev);
1706 	if (retval) {
1707 		put_device(&udc->gadget.dev);
1708 		goto put_transceiver;
1709 	}
1710 
1711 	retval = dbg_create_files(&udc->gadget.dev);
1712 	if (retval)
1713 		goto unreg_device;
1714 
1715 	if (udc->transceiver) {
1716 		retval = otg_set_peripheral(udc->transceiver->otg,
1717 						&udc->gadget);
1718 		if (retval)
1719 			goto remove_dbg;
1720 	}
1721 
1722 	retval = usb_add_gadget_udc(dev, &udc->gadget);
1723 	if (retval)
1724 		goto remove_trans;
1725 
1726 	pm_runtime_no_callbacks(&udc->gadget.dev);
1727 	pm_runtime_enable(&udc->gadget.dev);
1728 
1729 	return retval;
1730 
1731 remove_trans:
1732 	if (udc->transceiver) {
1733 		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1734 		usb_put_transceiver(udc->transceiver);
1735 	}
1736 
1737 	dev_err(dev, "error = %i\n", retval);
1738 remove_dbg:
1739 	dbg_remove_files(&udc->gadget.dev);
1740 unreg_device:
1741 	device_unregister(&udc->gadget.dev);
1742 put_transceiver:
1743 	if (udc->transceiver)
1744 		usb_put_transceiver(udc->transceiver);
1745 free_pools:
1746 	dma_pool_destroy(udc->td_pool);
1747 free_qh_pool:
1748 	dma_pool_destroy(udc->qh_pool);
1749 	return retval;
1750 }
1751 
1752 /**
1753  * udc_remove: parent remove must call this to remove UDC
1754  *
1755  * No interrupts active, the IRQ has been released
1756  */
1757 static void udc_stop(struct ci13xxx *udc)
1758 {
1759 	int i;
1760 
1761 	if (udc == NULL)
1762 		return;
1763 
1764 	usb_del_gadget_udc(&udc->gadget);
1765 
1766 	for (i = 0; i < udc->hw_ep_max; i++) {
1767 		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
1768 
1769 		dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
1770 	}
1771 
1772 	dma_pool_destroy(udc->td_pool);
1773 	dma_pool_destroy(udc->qh_pool);
1774 
1775 	if (udc->transceiver) {
1776 		otg_set_peripheral(udc->transceiver->otg, NULL);
1777 		usb_put_transceiver(udc->transceiver);
1778 	}
1779 	dbg_remove_files(&udc->gadget.dev);
1780 	device_unregister(&udc->gadget.dev);
1781 	/* my kobject is dynamic, I swear! */
1782 	memset(&udc->gadget, 0, sizeof(udc->gadget));
1783 }
1784 
1785 /**
1786  * ci_hdrc_gadget_init - initialize device related bits
1787  * ci: the controller
1788  *
1789  * This function enables the gadget role, if the device is "device capable".
1790  */
1791 int ci_hdrc_gadget_init(struct ci13xxx *ci)
1792 {
1793 	struct ci_role_driver *rdrv;
1794 
1795 	if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1796 		return -ENXIO;
1797 
1798 	rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1799 	if (!rdrv)
1800 		return -ENOMEM;
1801 
1802 	rdrv->start	= udc_start;
1803 	rdrv->stop	= udc_stop;
1804 	rdrv->irq	= udc_irq;
1805 	rdrv->name	= "gadget";
1806 	ci->roles[CI_ROLE_GADGET] = rdrv;
1807 
1808 	return 0;
1809 }
1810