xref: /linux/drivers/usb/gadget/udc/pch_udc.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20 
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
23 
24 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
26 
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
29 
30 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
31 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
35 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
37 
38 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
39 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
40 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
41 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
48 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
49 
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
53 #define UDC_EPCTL_RRDY			(1 << 9)
54 #define UDC_EPCTL_CNAK			(1 << 8)
55 #define UDC_EPCTL_SNAK			(1 << 7)
56 #define UDC_EPCTL_NAK			(1 << 6)
57 #define UDC_EPCTL_P			(1 << 3)
58 #define UDC_EPCTL_F			(1 << 1)
59 #define UDC_EPCTL_S			(1 << 0)
60 #define UDC_EPCTL_ET_SHIFT		4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK		0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL		0
65 #define UDC_EPCTL_ET_ISO		1
66 #define UDC_EPCTL_ET_BULK		2
67 #define UDC_EPCTL_ET_INTERRUPT		3
68 
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE		(1 << 27)
72 #define UDC_EPSTS_RSS			(1 << 26)
73 #define UDC_EPSTS_RCS			(1 << 25)
74 #define UDC_EPSTS_TXEMPTY		(1 << 24)
75 #define UDC_EPSTS_TDC			(1 << 10)
76 #define UDC_EPSTS_HE			(1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
78 #define UDC_EPSTS_BNA			(1 << 7)
79 #define UDC_EPSTS_IN			(1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT		4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK		0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP		2
86 #define UDC_EPSTS_OUT_DATA		1
87 
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
91 #define UDC_DEVCFG_SP			(1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS		0x0
94 #define UDC_DEVCFG_SPD_FS		0x1
95 #define UDC_DEVCFG_SPD_LS		0x2
96 
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT		24
100 #define UDC_DEVCTL_BRLEN_SHIFT		16
101 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
102 #define UDC_DEVCTL_SD			(1 << 10)
103 #define UDC_DEVCTL_MODE			(1 << 9)
104 #define UDC_DEVCTL_BREN			(1 << 8)
105 #define UDC_DEVCTL_THE			(1 << 7)
106 #define UDC_DEVCTL_DU			(1 << 4)
107 #define UDC_DEVCTL_TDE			(1 << 3)
108 #define UDC_DEVCTL_RDE			(1 << 2)
109 #define UDC_DEVCTL_RES			(1 << 0)
110 
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT		18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
115 #define UDC_DEVSTS_ALT_SHIFT		8
116 #define UDC_DEVSTS_INTF_SHIFT		4
117 #define UDC_DEVSTS_CFG_SHIFT		0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK		0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
121 #define UDC_DEVSTS_ALT_MASK		0x00000f00
122 #define UDC_DEVSTS_INTF_MASK		0x000000f0
123 #define UDC_DEVSTS_CFG_MASK		0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
129 
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP			(1 << 7)
133 #define UDC_DEVINT_ENUM			(1 << 6)
134 #define UDC_DEVINT_SOF			(1 << 5)
135 #define UDC_DEVINT_US			(1 << 4)
136 #define UDC_DEVINT_UR			(1 << 3)
137 #define UDC_DEVINT_ES			(1 << 2)
138 #define UDC_DEVINT_SI			(1 << 1)
139 #define UDC_DEVINT_SC			(1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK			0x7f
142 
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT		0
146 #define UDC_EPINT_OUT_SHIFT		16
147 #define UDC_EPINT_IN_EP0		(1 << 0)
148 #define UDC_EPINT_OUT_EP0		(1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
151 
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY			(1 << 0)
155 
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST			(1 << 1)
159 #define UDC_SRST			(1 << 0)
160 
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT		0
164 #define UDC_CSR_NE_DIR_SHIFT		4
165 #define UDC_CSR_NE_TYPE_SHIFT		5
166 #define UDC_CSR_NE_CFG_SHIFT		7
167 #define UDC_CSR_NE_INTF_SHIFT		11
168 #define UDC_CSR_NE_ALT_SHIFT		15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK		0x0000000f
172 #define UDC_CSR_NE_DIR_MASK		0x00000010
173 #define UDC_CSR_NE_TYPE_MASK		0x00000060
174 #define UDC_CSR_NE_CFG_MASK		0x00000780
175 #define UDC_CSR_NE_INTF_MASK		0x00007800
176 #define UDC_CSR_NE_ALT_MASK		0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
178 
179 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182 
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX		0
185 #define UDC_EP0OUT_IDX		1
186 #define UDC_EPIN_IDX(ep)	(ep * 2)
187 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
188 #define PCH_UDC_EP0		0
189 #define PCH_UDC_EP1		1
190 #define PCH_UDC_EP2		2
191 #define PCH_UDC_EP3		3
192 
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
198 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE	16
201 #define UDC_EPIN_BUFF_SIZE	256
202 #define UDC_EP0OUT_BUFF_SIZE	16
203 #define UDC_EPOUT_BUFF_SIZE	256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE	64
206 #define UDC_EP0OUT_MAX_PKT_SIZE	64
207 #define UDC_BULK_MAX_PKT_SIZE	512
208 
209 /* DMA */
210 #define DMA_DIR_RX		1	/* DMA for data receive */
211 #define DMA_DIR_TX		2	/* DMA for data transmit */
212 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
214 
215 /**
216  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217  *				  for data
218  * @status:		Status quadlet
219  * @reserved:		Reserved
220  * @dataptr:		Buffer descriptor
221  * @next:		Next descriptor
222  */
223 struct pch_udc_data_dma_desc {
224 	u32 status;
225 	u32 reserved;
226 	u32 dataptr;
227 	u32 next;
228 };
229 
230 /**
231  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232  *				 for control data
233  * @status:	Status
234  * @reserved:	Reserved
235  * @data12:	First setup word
236  * @data34:	Second setup word
237  */
238 struct pch_udc_stp_dma_desc {
239 	u32 status;
240 	u32 reserved;
241 	struct usb_ctrlrequest request;
242 } __attribute((packed));
243 
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS	0xC0000000
247 #define PCH_UDC_BS_HST_RDY	0x00000000
248 #define PCH_UDC_BS_DMA_BSY	0x40000000
249 #define PCH_UDC_BS_DMA_DONE	0x80000000
250 #define PCH_UDC_BS_HST_BSY	0xC0000000
251 /*  Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS	0x30000000
253 #define PCH_UDC_RTS_SUCC	0x00000000
254 #define PCH_UDC_RTS_DESERR	0x10000000
255 #define PCH_UDC_RTS_BUFERR	0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST	0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES	0x0000ffff
260 
261 /**
262  * struct pch_udc_cfg_data - Structure to hold current configuration
263  *			     and interface information
264  * @cur_cfg:	current configuration in use
265  * @cur_intf:	current interface in use
266  * @cur_alt:	current alt interface in use
267  */
268 struct pch_udc_cfg_data {
269 	u16 cur_cfg;
270 	u16 cur_intf;
271 	u16 cur_alt;
272 };
273 
274 /**
275  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276  * @ep:			embedded ep request
277  * @td_stp_phys:	for setup request
278  * @td_data_phys:	for data request
279  * @td_stp:		for setup request
280  * @td_data:		for data request
281  * @dev:		reference to device struct
282  * @offset_addr:	offset address of ep register
283  * @desc:		for this ep
284  * @queue:		queue for requests
285  * @num:		endpoint number
286  * @in:			endpoint is IN
287  * @halted:		endpoint halted?
288  * @epsts:		Endpoint status
289  */
290 struct pch_udc_ep {
291 	struct usb_ep			ep;
292 	dma_addr_t			td_stp_phys;
293 	dma_addr_t			td_data_phys;
294 	struct pch_udc_stp_dma_desc	*td_stp;
295 	struct pch_udc_data_dma_desc	*td_data;
296 	struct pch_udc_dev		*dev;
297 	unsigned long			offset_addr;
298 	struct list_head		queue;
299 	unsigned			num:5,
300 					in:1,
301 					halted:1;
302 	unsigned long			epsts;
303 };
304 
305 /**
306  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307  *					for detecting VBUS
308  * @port:		gpio port number
309  * @intr:		gpio interrupt number
310  * @irq_work_fall	Structure for WorkQueue
311  * @irq_work_rise	Structure for WorkQueue
312  */
313 struct pch_vbus_gpio_data {
314 	int			port;
315 	int			intr;
316 	struct work_struct	irq_work_fall;
317 	struct work_struct	irq_work_rise;
318 };
319 
320 /**
321  * struct pch_udc_dev - Structure holding complete information
322  *			of the PCH USB device
323  * @gadget:		gadget driver data
324  * @driver:		reference to gadget driver bound
325  * @pdev:		reference to the PCI device
326  * @ep:			array of endpoints
327  * @lock:		protects all state
328  * @stall:		stall requested
329  * @prot_stall:		protcol stall requested
330  * @registered:		driver registered with system
331  * @suspended:		driver in suspended state
332  * @connected:		gadget driver associated
333  * @vbus_session:	required vbus_session state
334  * @set_cfg_not_acked:	pending acknowledgement 4 setup
335  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
336  * @data_requests:	DMA pool for data requests
337  * @stp_requests:	DMA pool for setup requests
338  * @dma_addr:		DMA pool for received
339  * @setup_data:		Received setup data
340  * @base_addr:		for mapped device memory
341  * @cfg_data:		current cfg, intf, and alt in use
342  * @vbus_gpio:		GPIO informaton for detecting VBUS
343  */
344 struct pch_udc_dev {
345 	struct usb_gadget		gadget;
346 	struct usb_gadget_driver	*driver;
347 	struct pci_dev			*pdev;
348 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
349 	spinlock_t			lock; /* protects all state */
350 	unsigned
351 			stall:1,
352 			prot_stall:1,
353 			suspended:1,
354 			connected:1,
355 			vbus_session:1,
356 			set_cfg_not_acked:1,
357 			waiting_zlp_ack:1;
358 	struct pci_pool		*data_requests;
359 	struct pci_pool		*stp_requests;
360 	dma_addr_t			dma_addr;
361 	struct usb_ctrlrequest		setup_data;
362 	void __iomem			*base_addr;
363 	struct pch_udc_cfg_data		cfg_data;
364 	struct pch_vbus_gpio_data	vbus_gpio;
365 };
366 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
367 
368 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
369 #define PCH_UDC_PCI_BAR			1
370 
371 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
372 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
373 
374 #define PCI_VENDOR_ID_ROHM		0x10DB
375 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
376 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
377 
378 static const char	ep0_string[] = "ep0in";
379 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
380 static bool speed_fs;
381 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
382 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383 
384 /**
385  * struct pch_udc_request - Structure holding a PCH USB device request packet
386  * @req:		embedded ep request
387  * @td_data_phys:	phys. address
388  * @td_data:		first dma desc. of chain
389  * @td_data_last:	last dma desc. of chain
390  * @queue:		associated queue
391  * @dma_going:		DMA in progress for request
392  * @dma_mapped:		DMA memory mapped for request
393  * @dma_done:		DMA completed for request
394  * @chain_len:		chain length
395  * @buf:		Buffer memory for align adjustment
396  * @dma:		DMA memory for align adjustment
397  */
398 struct pch_udc_request {
399 	struct usb_request		req;
400 	dma_addr_t			td_data_phys;
401 	struct pch_udc_data_dma_desc	*td_data;
402 	struct pch_udc_data_dma_desc	*td_data_last;
403 	struct list_head		queue;
404 	unsigned			dma_going:1,
405 					dma_mapped:1,
406 					dma_done:1;
407 	unsigned			chain_len;
408 	void				*buf;
409 	dma_addr_t			dma;
410 };
411 
412 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
413 {
414 	return ioread32(dev->base_addr + reg);
415 }
416 
417 static inline void pch_udc_writel(struct pch_udc_dev *dev,
418 				    unsigned long val, unsigned long reg)
419 {
420 	iowrite32(val, dev->base_addr + reg);
421 }
422 
423 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
424 				     unsigned long reg,
425 				     unsigned long bitmask)
426 {
427 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
428 }
429 
430 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
431 				     unsigned long reg,
432 				     unsigned long bitmask)
433 {
434 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
435 }
436 
437 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
438 {
439 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
440 }
441 
442 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
443 				    unsigned long val, unsigned long reg)
444 {
445 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
446 }
447 
448 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
449 				     unsigned long reg,
450 				     unsigned long bitmask)
451 {
452 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
453 }
454 
455 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
456 				     unsigned long reg,
457 				     unsigned long bitmask)
458 {
459 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
460 }
461 
462 /**
463  * pch_udc_csr_busy() - Wait till idle.
464  * @dev:	Reference to pch_udc_dev structure
465  */
466 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
467 {
468 	unsigned int count = 200;
469 
470 	/* Wait till idle */
471 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
472 		&& --count)
473 		cpu_relax();
474 	if (!count)
475 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
476 }
477 
478 /**
479  * pch_udc_write_csr() - Write the command and status registers.
480  * @dev:	Reference to pch_udc_dev structure
481  * @val:	value to be written to CSR register
482  * @addr:	address of CSR register
483  */
484 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
485 			       unsigned int ep)
486 {
487 	unsigned long reg = PCH_UDC_CSR(ep);
488 
489 	pch_udc_csr_busy(dev);		/* Wait till idle */
490 	pch_udc_writel(dev, val, reg);
491 	pch_udc_csr_busy(dev);		/* Wait till idle */
492 }
493 
494 /**
495  * pch_udc_read_csr() - Read the command and status registers.
496  * @dev:	Reference to pch_udc_dev structure
497  * @addr:	address of CSR register
498  *
499  * Return codes:	content of CSR register
500  */
501 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
502 {
503 	unsigned long reg = PCH_UDC_CSR(ep);
504 
505 	pch_udc_csr_busy(dev);		/* Wait till idle */
506 	pch_udc_readl(dev, reg);	/* Dummy read */
507 	pch_udc_csr_busy(dev);		/* Wait till idle */
508 	return pch_udc_readl(dev, reg);
509 }
510 
511 /**
512  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
513  * @dev:	Reference to pch_udc_dev structure
514  */
515 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
516 {
517 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
518 	mdelay(1);
519 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
520 }
521 
522 /**
523  * pch_udc_get_frame() - Get the current frame from device status register
524  * @dev:	Reference to pch_udc_dev structure
525  * Retern	current frame
526  */
527 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
528 {
529 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
530 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
531 }
532 
533 /**
534  * pch_udc_clear_selfpowered() - Clear the self power control
535  * @dev:	Reference to pch_udc_regs structure
536  */
537 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
538 {
539 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
540 }
541 
542 /**
543  * pch_udc_set_selfpowered() - Set the self power control
544  * @dev:	Reference to pch_udc_regs structure
545  */
546 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
547 {
548 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
549 }
550 
551 /**
552  * pch_udc_set_disconnect() - Set the disconnect status.
553  * @dev:	Reference to pch_udc_regs structure
554  */
555 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
556 {
557 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
558 }
559 
560 /**
561  * pch_udc_clear_disconnect() - Clear the disconnect status.
562  * @dev:	Reference to pch_udc_regs structure
563  */
564 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
565 {
566 	/* Clear the disconnect */
567 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569 	mdelay(1);
570 	/* Resume USB signalling */
571 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
572 }
573 
574 /**
575  * pch_udc_reconnect() - This API initializes usb device controller,
576  *						and clear the disconnect status.
577  * @dev:		Reference to pch_udc_regs structure
578  */
579 static void pch_udc_init(struct pch_udc_dev *dev);
580 static void pch_udc_reconnect(struct pch_udc_dev *dev)
581 {
582 	pch_udc_init(dev);
583 
584 	/* enable device interrupts */
585 	/* pch_udc_enable_interrupts() */
586 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
587 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
588 
589 	/* Clear the disconnect */
590 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
592 	mdelay(1);
593 	/* Resume USB signalling */
594 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
595 }
596 
597 /**
598  * pch_udc_vbus_session() - set or clearr the disconnect status.
599  * @dev:	Reference to pch_udc_regs structure
600  * @is_active:	Parameter specifying the action
601  *		  0:   indicating VBUS power is ending
602  *		  !0:  indicating VBUS power is starting
603  */
604 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
605 					  int is_active)
606 {
607 	if (is_active) {
608 		pch_udc_reconnect(dev);
609 		dev->vbus_session = 1;
610 	} else {
611 		if (dev->driver && dev->driver->disconnect) {
612 			spin_lock(&dev->lock);
613 			dev->driver->disconnect(&dev->gadget);
614 			spin_unlock(&dev->lock);
615 		}
616 		pch_udc_set_disconnect(dev);
617 		dev->vbus_session = 0;
618 	}
619 }
620 
621 /**
622  * pch_udc_ep_set_stall() - Set the stall of endpoint
623  * @ep:		Reference to structure of type pch_udc_ep_regs
624  */
625 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
626 {
627 	if (ep->in) {
628 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
629 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 	} else {
631 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
632 	}
633 }
634 
635 /**
636  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
637  * @ep:		Reference to structure of type pch_udc_ep_regs
638  */
639 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
640 {
641 	/* Clear the stall */
642 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643 	/* Clear NAK by writing CNAK */
644 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
645 }
646 
647 /**
648  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
649  * @ep:		Reference to structure of type pch_udc_ep_regs
650  * @type:	Type of endpoint
651  */
652 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
653 					u8 type)
654 {
655 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
656 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
657 }
658 
659 /**
660  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
661  * @ep:		Reference to structure of type pch_udc_ep_regs
662  * @buf_size:	The buffer word size
663  */
664 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
665 						 u32 buf_size, u32 ep_in)
666 {
667 	u32 data;
668 	if (ep_in) {
669 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
670 		data = (data & 0xffff0000) | (buf_size & 0xffff);
671 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
672 	} else {
673 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
674 		data = (buf_size << 16) | (data & 0xffff);
675 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
676 	}
677 }
678 
679 /**
680  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
681  * @ep:		Reference to structure of type pch_udc_ep_regs
682  * @pkt_size:	The packet byte size
683  */
684 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
685 {
686 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
687 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
688 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
689 }
690 
691 /**
692  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
693  * @ep:		Reference to structure of type pch_udc_ep_regs
694  * @addr:	Address of the register
695  */
696 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
697 {
698 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
699 }
700 
701 /**
702  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
703  * @ep:		Reference to structure of type pch_udc_ep_regs
704  * @addr:	Address of the register
705  */
706 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
707 {
708 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
709 }
710 
711 /**
712  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
713  * @ep:		Reference to structure of type pch_udc_ep_regs
714  */
715 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
716 {
717 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
718 }
719 
720 /**
721  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
722  * @ep:		Reference to structure of type pch_udc_ep_regs
723  */
724 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
725 {
726 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
727 }
728 
729 /**
730  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
731  * @ep:		Reference to structure of type pch_udc_ep_regs
732  */
733 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
734 {
735 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
736 }
737 
738 /**
739  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
740  *			register depending on the direction specified
741  * @dev:	Reference to structure of type pch_udc_regs
742  * @dir:	whether Tx or Rx
743  *		  DMA_DIR_RX: Receive
744  *		  DMA_DIR_TX: Transmit
745  */
746 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
747 {
748 	if (dir == DMA_DIR_RX)
749 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
750 	else if (dir == DMA_DIR_TX)
751 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
752 }
753 
754 /**
755  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
756  *				 register depending on the direction specified
757  * @dev:	Reference to structure of type pch_udc_regs
758  * @dir:	Whether Tx or Rx
759  *		  DMA_DIR_RX: Receive
760  *		  DMA_DIR_TX: Transmit
761  */
762 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
763 {
764 	if (dir == DMA_DIR_RX)
765 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
766 	else if (dir == DMA_DIR_TX)
767 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
768 }
769 
770 /**
771  * pch_udc_set_csr_done() - Set the device control register
772  *				CSR done field (bit 13)
773  * @dev:	reference to structure of type pch_udc_regs
774  */
775 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
776 {
777 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
778 }
779 
780 /**
781  * pch_udc_disable_interrupts() - Disables the specified interrupts
782  * @dev:	Reference to structure of type pch_udc_regs
783  * @mask:	Mask to disable interrupts
784  */
785 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
786 					    u32 mask)
787 {
788 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
789 }
790 
791 /**
792  * pch_udc_enable_interrupts() - Enable the specified interrupts
793  * @dev:	Reference to structure of type pch_udc_regs
794  * @mask:	Mask to enable interrupts
795  */
796 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
797 					   u32 mask)
798 {
799 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
800 }
801 
802 /**
803  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
804  * @dev:	Reference to structure of type pch_udc_regs
805  * @mask:	Mask to disable interrupts
806  */
807 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
808 						u32 mask)
809 {
810 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
811 }
812 
813 /**
814  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
815  * @dev:	Reference to structure of type pch_udc_regs
816  * @mask:	Mask to enable interrupts
817  */
818 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
819 					      u32 mask)
820 {
821 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
822 }
823 
824 /**
825  * pch_udc_read_device_interrupts() - Read the device interrupts
826  * @dev:	Reference to structure of type pch_udc_regs
827  * Retern	The device interrupts
828  */
829 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
830 {
831 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
832 }
833 
834 /**
835  * pch_udc_write_device_interrupts() - Write device interrupts
836  * @dev:	Reference to structure of type pch_udc_regs
837  * @val:	The value to be written to interrupt register
838  */
839 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
840 						     u32 val)
841 {
842 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
843 }
844 
845 /**
846  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
847  * @dev:	Reference to structure of type pch_udc_regs
848  * Retern	The endpoint interrupt
849  */
850 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
851 {
852 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
853 }
854 
855 /**
856  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
857  * @dev:	Reference to structure of type pch_udc_regs
858  * @val:	The value to be written to interrupt register
859  */
860 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
861 					     u32 val)
862 {
863 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
864 }
865 
866 /**
867  * pch_udc_read_device_status() - Read the device status
868  * @dev:	Reference to structure of type pch_udc_regs
869  * Retern	The device status
870  */
871 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
872 {
873 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
874 }
875 
876 /**
877  * pch_udc_read_ep_control() - Read the endpoint control
878  * @ep:		Reference to structure of type pch_udc_ep_regs
879  * Retern	The endpoint control register value
880  */
881 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
882 {
883 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
884 }
885 
886 /**
887  * pch_udc_clear_ep_control() - Clear the endpoint control register
888  * @ep:		Reference to structure of type pch_udc_ep_regs
889  * Retern	The endpoint control register value
890  */
891 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
892 {
893 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
894 }
895 
896 /**
897  * pch_udc_read_ep_status() - Read the endpoint status
898  * @ep:		Reference to structure of type pch_udc_ep_regs
899  * Retern	The endpoint status
900  */
901 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
902 {
903 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
904 }
905 
906 /**
907  * pch_udc_clear_ep_status() - Clear the endpoint status
908  * @ep:		Reference to structure of type pch_udc_ep_regs
909  * @stat:	Endpoint status
910  */
911 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
912 					 u32 stat)
913 {
914 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
915 }
916 
917 /**
918  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
919  *				of the endpoint control register
920  * @ep:		Reference to structure of type pch_udc_ep_regs
921  */
922 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
923 {
924 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
925 }
926 
927 /**
928  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
929  *				of the endpoint control register
930  * @ep:		reference to structure of type pch_udc_ep_regs
931  */
932 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
933 {
934 	unsigned int loopcnt = 0;
935 	struct pch_udc_dev *dev = ep->dev;
936 
937 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
938 		return;
939 	if (!ep->in) {
940 		loopcnt = 10000;
941 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
942 			--loopcnt)
943 			udelay(5);
944 		if (!loopcnt)
945 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
946 				__func__);
947 	}
948 	loopcnt = 10000;
949 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
950 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
951 		udelay(5);
952 	}
953 	if (!loopcnt)
954 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
955 			__func__, ep->num, (ep->in ? "in" : "out"));
956 }
957 
958 /**
959  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
960  * @ep:	reference to structure of type pch_udc_ep_regs
961  * @dir:	direction of endpoint
962  *		  0:  endpoint is OUT
963  *		  !0: endpoint is IN
964  */
965 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
966 {
967 	if (dir) {	/* IN ep */
968 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
969 		return;
970 	}
971 }
972 
973 /**
974  * pch_udc_ep_enable() - This api enables endpoint
975  * @regs:	Reference to structure pch_udc_ep_regs
976  * @desc:	endpoint descriptor
977  */
978 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
979 			       struct pch_udc_cfg_data *cfg,
980 			       const struct usb_endpoint_descriptor *desc)
981 {
982 	u32 val = 0;
983 	u32 buff_size = 0;
984 
985 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
986 	if (ep->in)
987 		buff_size = UDC_EPIN_BUFF_SIZE;
988 	else
989 		buff_size = UDC_EPOUT_BUFF_SIZE;
990 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
991 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
992 	pch_udc_ep_set_nak(ep);
993 	pch_udc_ep_fifo_flush(ep, ep->in);
994 	/* Configure the endpoint */
995 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
996 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
997 		UDC_CSR_NE_TYPE_SHIFT) |
998 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
999 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1000 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1001 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1002 
1003 	if (ep->in)
1004 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1005 	else
1006 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1007 }
1008 
1009 /**
1010  * pch_udc_ep_disable() - This api disables endpoint
1011  * @regs:	Reference to structure pch_udc_ep_regs
1012  */
1013 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1014 {
1015 	if (ep->in) {
1016 		/* flush the fifo */
1017 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1018 		/* set NAK */
1019 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1021 	} else {
1022 		/* set NAK */
1023 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1024 	}
1025 	/* reset desc pointer */
1026 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1027 }
1028 
1029 /**
1030  * pch_udc_wait_ep_stall() - Wait EP stall.
1031  * @dev:	Reference to pch_udc_dev structure
1032  */
1033 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1034 {
1035 	unsigned int count = 10000;
1036 
1037 	/* Wait till idle */
1038 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1039 		udelay(5);
1040 	if (!count)
1041 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1042 }
1043 
1044 /**
1045  * pch_udc_init() - This API initializes usb device controller
1046  * @dev:	Rreference to pch_udc_regs structure
1047  */
1048 static void pch_udc_init(struct pch_udc_dev *dev)
1049 {
1050 	if (NULL == dev) {
1051 		pr_err("%s: Invalid address\n", __func__);
1052 		return;
1053 	}
1054 	/* Soft Reset and Reset PHY */
1055 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1057 	mdelay(1);
1058 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1059 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1060 	mdelay(1);
1061 	/* mask and clear all device interrupts */
1062 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1063 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1064 
1065 	/* mask and clear all ep interrupts */
1066 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1067 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1068 
1069 	/* enable dynamic CSR programmingi, self powered and device speed */
1070 	if (speed_fs)
1071 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1073 	else /* defaul high speed */
1074 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1075 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1076 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1077 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1078 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1079 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1080 			UDC_DEVCTL_THE);
1081 }
1082 
1083 /**
1084  * pch_udc_exit() - This API exit usb device controller
1085  * @dev:	Reference to pch_udc_regs structure
1086  */
1087 static void pch_udc_exit(struct pch_udc_dev *dev)
1088 {
1089 	/* mask all device interrupts */
1090 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1091 	/* mask all ep interrupts */
1092 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1093 	/* put device in disconnected state */
1094 	pch_udc_set_disconnect(dev);
1095 }
1096 
1097 /**
1098  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1099  * @gadget:	Reference to the gadget driver
1100  *
1101  * Return codes:
1102  *	0:		Success
1103  *	-EINVAL:	If the gadget passed is NULL
1104  */
1105 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1106 {
1107 	struct pch_udc_dev	*dev;
1108 
1109 	if (!gadget)
1110 		return -EINVAL;
1111 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1112 	return pch_udc_get_frame(dev);
1113 }
1114 
1115 /**
1116  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1117  * @gadget:	Reference to the gadget driver
1118  *
1119  * Return codes:
1120  *	0:		Success
1121  *	-EINVAL:	If the gadget passed is NULL
1122  */
1123 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1124 {
1125 	struct pch_udc_dev	*dev;
1126 	unsigned long		flags;
1127 
1128 	if (!gadget)
1129 		return -EINVAL;
1130 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1131 	spin_lock_irqsave(&dev->lock, flags);
1132 	pch_udc_rmt_wakeup(dev);
1133 	spin_unlock_irqrestore(&dev->lock, flags);
1134 	return 0;
1135 }
1136 
1137 /**
1138  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1139  *				is self powered or not
1140  * @gadget:	Reference to the gadget driver
1141  * @value:	Specifies self powered or not
1142  *
1143  * Return codes:
1144  *	0:		Success
1145  *	-EINVAL:	If the gadget passed is NULL
1146  */
1147 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1148 {
1149 	struct pch_udc_dev	*dev;
1150 
1151 	if (!gadget)
1152 		return -EINVAL;
1153 	gadget->is_selfpowered = (value != 0);
1154 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1155 	if (value)
1156 		pch_udc_set_selfpowered(dev);
1157 	else
1158 		pch_udc_clear_selfpowered(dev);
1159 	return 0;
1160 }
1161 
1162 /**
1163  * pch_udc_pcd_pullup() - This API is invoked to make the device
1164  *				visible/invisible to the host
1165  * @gadget:	Reference to the gadget driver
1166  * @is_on:	Specifies whether the pull up is made active or inactive
1167  *
1168  * Return codes:
1169  *	0:		Success
1170  *	-EINVAL:	If the gadget passed is NULL
1171  */
1172 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1173 {
1174 	struct pch_udc_dev	*dev;
1175 
1176 	if (!gadget)
1177 		return -EINVAL;
1178 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1179 	if (is_on) {
1180 		pch_udc_reconnect(dev);
1181 	} else {
1182 		if (dev->driver && dev->driver->disconnect) {
1183 			spin_lock(&dev->lock);
1184 			dev->driver->disconnect(&dev->gadget);
1185 			spin_unlock(&dev->lock);
1186 		}
1187 		pch_udc_set_disconnect(dev);
1188 	}
1189 
1190 	return 0;
1191 }
1192 
1193 /**
1194  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1195  *				transceiver (or GPIO) that
1196  *				detects a VBUS power session starting/ending
1197  * @gadget:	Reference to the gadget driver
1198  * @is_active:	specifies whether the session is starting or ending
1199  *
1200  * Return codes:
1201  *	0:		Success
1202  *	-EINVAL:	If the gadget passed is NULL
1203  */
1204 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1205 {
1206 	struct pch_udc_dev	*dev;
1207 
1208 	if (!gadget)
1209 		return -EINVAL;
1210 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1211 	pch_udc_vbus_session(dev, is_active);
1212 	return 0;
1213 }
1214 
1215 /**
1216  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1217  *				SET_CONFIGURATION calls to
1218  *				specify how much power the device can consume
1219  * @gadget:	Reference to the gadget driver
1220  * @mA:		specifies the current limit in 2mA unit
1221  *
1222  * Return codes:
1223  *	-EINVAL:	If the gadget passed is NULL
1224  *	-EOPNOTSUPP:
1225  */
1226 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1227 {
1228 	return -EOPNOTSUPP;
1229 }
1230 
1231 static int pch_udc_start(struct usb_gadget *g,
1232 		struct usb_gadget_driver *driver);
1233 static int pch_udc_stop(struct usb_gadget *g);
1234 
1235 static const struct usb_gadget_ops pch_udc_ops = {
1236 	.get_frame = pch_udc_pcd_get_frame,
1237 	.wakeup = pch_udc_pcd_wakeup,
1238 	.set_selfpowered = pch_udc_pcd_selfpowered,
1239 	.pullup = pch_udc_pcd_pullup,
1240 	.vbus_session = pch_udc_pcd_vbus_session,
1241 	.vbus_draw = pch_udc_pcd_vbus_draw,
1242 	.udc_start = pch_udc_start,
1243 	.udc_stop = pch_udc_stop,
1244 };
1245 
1246 /**
1247  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1248  * @dev:	Reference to the driver structure
1249  *
1250  * Return value:
1251  *	1: VBUS is high
1252  *	0: VBUS is low
1253  *     -1: It is not enable to detect VBUS using GPIO
1254  */
1255 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1256 {
1257 	int vbus = 0;
1258 
1259 	if (dev->vbus_gpio.port)
1260 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1261 	else
1262 		vbus = -1;
1263 
1264 	return vbus;
1265 }
1266 
1267 /**
1268  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1269  *                             If VBUS is Low, disconnect is processed
1270  * @irq_work:	Structure for WorkQueue
1271  *
1272  */
1273 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1274 {
1275 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1276 		struct pch_vbus_gpio_data, irq_work_fall);
1277 	struct pch_udc_dev *dev =
1278 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1279 	int vbus_saved = -1;
1280 	int vbus;
1281 	int count;
1282 
1283 	if (!dev->vbus_gpio.port)
1284 		return;
1285 
1286 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1287 		count++) {
1288 		vbus = pch_vbus_gpio_get_value(dev);
1289 
1290 		if ((vbus_saved == vbus) && (vbus == 0)) {
1291 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1292 			if (dev->driver
1293 				&& dev->driver->disconnect) {
1294 				dev->driver->disconnect(
1295 					&dev->gadget);
1296 			}
1297 			if (dev->vbus_gpio.intr)
1298 				pch_udc_init(dev);
1299 			else
1300 				pch_udc_reconnect(dev);
1301 			return;
1302 		}
1303 		vbus_saved = vbus;
1304 		mdelay(PCH_VBUS_INTERVAL);
1305 	}
1306 }
1307 
1308 /**
1309  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1310  *                             If VBUS is High, connect is processed
1311  * @irq_work:	Structure for WorkQueue
1312  *
1313  */
1314 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1315 {
1316 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1317 		struct pch_vbus_gpio_data, irq_work_rise);
1318 	struct pch_udc_dev *dev =
1319 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1320 	int vbus;
1321 
1322 	if (!dev->vbus_gpio.port)
1323 		return;
1324 
1325 	mdelay(PCH_VBUS_INTERVAL);
1326 	vbus = pch_vbus_gpio_get_value(dev);
1327 
1328 	if (vbus == 1) {
1329 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1330 		pch_udc_reconnect(dev);
1331 		return;
1332 	}
1333 }
1334 
1335 /**
1336  * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1337  * @irq:	Interrupt request number
1338  * @dev:	Reference to the device structure
1339  *
1340  * Return codes:
1341  *	0: Success
1342  *	-EINVAL: GPIO port is invalid or can't be initialized.
1343  */
1344 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1345 {
1346 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1347 
1348 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1349 		return IRQ_NONE;
1350 
1351 	if (pch_vbus_gpio_get_value(dev))
1352 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1353 	else
1354 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1355 
1356 	return IRQ_HANDLED;
1357 }
1358 
1359 /**
1360  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1361  * @dev:	Reference to the driver structure
1362  * @vbus_gpio	Number of GPIO port to detect gpio
1363  *
1364  * Return codes:
1365  *	0: Success
1366  *	-EINVAL: GPIO port is invalid or can't be initialized.
1367  */
1368 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1369 {
1370 	int err;
1371 	int irq_num = 0;
1372 
1373 	dev->vbus_gpio.port = 0;
1374 	dev->vbus_gpio.intr = 0;
1375 
1376 	if (vbus_gpio_port <= -1)
1377 		return -EINVAL;
1378 
1379 	err = gpio_is_valid(vbus_gpio_port);
1380 	if (!err) {
1381 		pr_err("%s: gpio port %d is invalid\n",
1382 			__func__, vbus_gpio_port);
1383 		return -EINVAL;
1384 	}
1385 
1386 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1387 	if (err) {
1388 		pr_err("%s: can't request gpio port %d, err: %d\n",
1389 			__func__, vbus_gpio_port, err);
1390 		return -EINVAL;
1391 	}
1392 
1393 	dev->vbus_gpio.port = vbus_gpio_port;
1394 	gpio_direction_input(vbus_gpio_port);
1395 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1396 
1397 	irq_num = gpio_to_irq(vbus_gpio_port);
1398 	if (irq_num > 0) {
1399 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1400 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1401 			"vbus_detect", dev);
1402 		if (!err) {
1403 			dev->vbus_gpio.intr = irq_num;
1404 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1405 				pch_vbus_gpio_work_rise);
1406 		} else {
1407 			pr_err("%s: can't request irq %d, err: %d\n",
1408 				__func__, irq_num, err);
1409 		}
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1417  * @dev:	Reference to the driver structure
1418  */
1419 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1420 {
1421 	if (dev->vbus_gpio.intr)
1422 		free_irq(dev->vbus_gpio.intr, dev);
1423 
1424 	if (dev->vbus_gpio.port)
1425 		gpio_free(dev->vbus_gpio.port);
1426 }
1427 
1428 /**
1429  * complete_req() - This API is invoked from the driver when processing
1430  *			of a request is complete
1431  * @ep:		Reference to the endpoint structure
1432  * @req:	Reference to the request structure
1433  * @status:	Indicates the success/failure of completion
1434  */
1435 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1436 								 int status)
1437 	__releases(&dev->lock)
1438 	__acquires(&dev->lock)
1439 {
1440 	struct pch_udc_dev	*dev;
1441 	unsigned halted = ep->halted;
1442 
1443 	list_del_init(&req->queue);
1444 
1445 	/* set new status if pending */
1446 	if (req->req.status == -EINPROGRESS)
1447 		req->req.status = status;
1448 	else
1449 		status = req->req.status;
1450 
1451 	dev = ep->dev;
1452 	if (req->dma_mapped) {
1453 		if (req->dma == DMA_ADDR_INVALID) {
1454 			if (ep->in)
1455 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 						 req->req.length,
1457 						 DMA_TO_DEVICE);
1458 			else
1459 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 						 req->req.length,
1461 						 DMA_FROM_DEVICE);
1462 			req->req.dma = DMA_ADDR_INVALID;
1463 		} else {
1464 			if (ep->in)
1465 				dma_unmap_single(&dev->pdev->dev, req->dma,
1466 						 req->req.length,
1467 						 DMA_TO_DEVICE);
1468 			else {
1469 				dma_unmap_single(&dev->pdev->dev, req->dma,
1470 						 req->req.length,
1471 						 DMA_FROM_DEVICE);
1472 				memcpy(req->req.buf, req->buf, req->req.length);
1473 			}
1474 			kfree(req->buf);
1475 			req->dma = DMA_ADDR_INVALID;
1476 		}
1477 		req->dma_mapped = 0;
1478 	}
1479 	ep->halted = 1;
1480 	spin_unlock(&dev->lock);
1481 	if (!ep->in)
1482 		pch_udc_ep_clear_rrdy(ep);
1483 	usb_gadget_giveback_request(&ep->ep, &req->req);
1484 	spin_lock(&dev->lock);
1485 	ep->halted = halted;
1486 }
1487 
1488 /**
1489  * empty_req_queue() - This API empties the request queue of an endpoint
1490  * @ep:		Reference to the endpoint structure
1491  */
1492 static void empty_req_queue(struct pch_udc_ep *ep)
1493 {
1494 	struct pch_udc_request	*req;
1495 
1496 	ep->halted = 1;
1497 	while (!list_empty(&ep->queue)) {
1498 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1499 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1500 	}
1501 }
1502 
1503 /**
1504  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1505  *				for the request
1506  * @dev		Reference to the driver structure
1507  * @req		Reference to the request to be freed
1508  *
1509  * Return codes:
1510  *	0: Success
1511  */
1512 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1513 				   struct pch_udc_request *req)
1514 {
1515 	struct pch_udc_data_dma_desc *td = req->td_data;
1516 	unsigned i = req->chain_len;
1517 
1518 	dma_addr_t addr2;
1519 	dma_addr_t addr = (dma_addr_t)td->next;
1520 	td->next = 0x00;
1521 	for (; i > 1; --i) {
1522 		/* do not free first desc., will be done by free for request */
1523 		td = phys_to_virt(addr);
1524 		addr2 = (dma_addr_t)td->next;
1525 		pci_pool_free(dev->data_requests, td, addr);
1526 		addr = addr2;
1527 	}
1528 	req->chain_len = 1;
1529 }
1530 
1531 /**
1532  * pch_udc_create_dma_chain() - This function creates or reinitializes
1533  *				a DMA chain
1534  * @ep:		Reference to the endpoint structure
1535  * @req:	Reference to the request
1536  * @buf_len:	The buffer length
1537  * @gfp_flags:	Flags to be used while mapping the data buffer
1538  *
1539  * Return codes:
1540  *	0:		success,
1541  *	-ENOMEM:	pci_pool_alloc invocation fails
1542  */
1543 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1544 				    struct pch_udc_request *req,
1545 				    unsigned long buf_len,
1546 				    gfp_t gfp_flags)
1547 {
1548 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1549 	unsigned long bytes = req->req.length, i = 0;
1550 	dma_addr_t dma_addr;
1551 	unsigned len = 1;
1552 
1553 	if (req->chain_len > 1)
1554 		pch_udc_free_dma_chain(ep->dev, req);
1555 
1556 	if (req->dma == DMA_ADDR_INVALID)
1557 		td->dataptr = req->req.dma;
1558 	else
1559 		td->dataptr = req->dma;
1560 
1561 	td->status = PCH_UDC_BS_HST_BSY;
1562 	for (; ; bytes -= buf_len, ++len) {
1563 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1564 		if (bytes <= buf_len)
1565 			break;
1566 		last = td;
1567 		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1568 				    &dma_addr);
1569 		if (!td)
1570 			goto nomem;
1571 		i += buf_len;
1572 		td->dataptr = req->td_data->dataptr + i;
1573 		last->next = dma_addr;
1574 	}
1575 
1576 	req->td_data_last = td;
1577 	td->status |= PCH_UDC_DMA_LAST;
1578 	td->next = req->td_data_phys;
1579 	req->chain_len = len;
1580 	return 0;
1581 
1582 nomem:
1583 	if (len > 1) {
1584 		req->chain_len = len;
1585 		pch_udc_free_dma_chain(ep->dev, req);
1586 	}
1587 	req->chain_len = 1;
1588 	return -ENOMEM;
1589 }
1590 
1591 /**
1592  * prepare_dma() - This function creates and initializes the DMA chain
1593  *			for the request
1594  * @ep:		Reference to the endpoint structure
1595  * @req:	Reference to the request
1596  * @gfp:	Flag to be used while mapping the data buffer
1597  *
1598  * Return codes:
1599  *	0:		Success
1600  *	Other 0:	linux error number on failure
1601  */
1602 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1603 			  gfp_t gfp)
1604 {
1605 	int	retval;
1606 
1607 	/* Allocate and create a DMA chain */
1608 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1609 	if (retval) {
1610 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1611 		return retval;
1612 	}
1613 	if (ep->in)
1614 		req->td_data->status = (req->td_data->status &
1615 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1616 	return 0;
1617 }
1618 
1619 /**
1620  * process_zlp() - This function process zero length packets
1621  *			from the gadget driver
1622  * @ep:		Reference to the endpoint structure
1623  * @req:	Reference to the request
1624  */
1625 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1626 {
1627 	struct pch_udc_dev	*dev = ep->dev;
1628 
1629 	/* IN zlp's are handled by hardware */
1630 	complete_req(ep, req, 0);
1631 
1632 	/* if set_config or set_intf is waiting for ack by zlp
1633 	 * then set CSR_DONE
1634 	 */
1635 	if (dev->set_cfg_not_acked) {
1636 		pch_udc_set_csr_done(dev);
1637 		dev->set_cfg_not_acked = 0;
1638 	}
1639 	/* setup command is ACK'ed now by zlp */
1640 	if (!dev->stall && dev->waiting_zlp_ack) {
1641 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1642 		dev->waiting_zlp_ack = 0;
1643 	}
1644 }
1645 
1646 /**
1647  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1648  * @ep:		Reference to the endpoint structure
1649  * @req:	Reference to the request structure
1650  */
1651 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1652 					 struct pch_udc_request *req)
1653 {
1654 	struct pch_udc_data_dma_desc *td_data;
1655 
1656 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1657 	td_data = req->td_data;
1658 	/* Set the status bits for all descriptors */
1659 	while (1) {
1660 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1661 				    PCH_UDC_BS_HST_RDY;
1662 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1663 			break;
1664 		td_data = phys_to_virt(td_data->next);
1665 	}
1666 	/* Write the descriptor pointer */
1667 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1668 	req->dma_going = 1;
1669 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1670 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1671 	pch_udc_ep_clear_nak(ep);
1672 	pch_udc_ep_set_rrdy(ep);
1673 }
1674 
1675 /**
1676  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1677  *				from gadget driver
1678  * @usbep:	Reference to the USB endpoint structure
1679  * @desc:	Reference to the USB endpoint descriptor structure
1680  *
1681  * Return codes:
1682  *	0:		Success
1683  *	-EINVAL:
1684  *	-ESHUTDOWN:
1685  */
1686 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1687 				    const struct usb_endpoint_descriptor *desc)
1688 {
1689 	struct pch_udc_ep	*ep;
1690 	struct pch_udc_dev	*dev;
1691 	unsigned long		iflags;
1692 
1693 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1694 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1695 		return -EINVAL;
1696 
1697 	ep = container_of(usbep, struct pch_udc_ep, ep);
1698 	dev = ep->dev;
1699 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1700 		return -ESHUTDOWN;
1701 	spin_lock_irqsave(&dev->lock, iflags);
1702 	ep->ep.desc = desc;
1703 	ep->halted = 0;
1704 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1705 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1706 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1707 	spin_unlock_irqrestore(&dev->lock, iflags);
1708 	return 0;
1709 }
1710 
1711 /**
1712  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1713  *				from gadget driver
1714  * @usbep	Reference to the USB endpoint structure
1715  *
1716  * Return codes:
1717  *	0:		Success
1718  *	-EINVAL:
1719  */
1720 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1721 {
1722 	struct pch_udc_ep	*ep;
1723 	unsigned long	iflags;
1724 
1725 	if (!usbep)
1726 		return -EINVAL;
1727 
1728 	ep = container_of(usbep, struct pch_udc_ep, ep);
1729 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1730 		return -EINVAL;
1731 
1732 	spin_lock_irqsave(&ep->dev->lock, iflags);
1733 	empty_req_queue(ep);
1734 	ep->halted = 1;
1735 	pch_udc_ep_disable(ep);
1736 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1737 	ep->ep.desc = NULL;
1738 	INIT_LIST_HEAD(&ep->queue);
1739 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1740 	return 0;
1741 }
1742 
1743 /**
1744  * pch_udc_alloc_request() - This function allocates request structure.
1745  *				It is called by gadget driver
1746  * @usbep:	Reference to the USB endpoint structure
1747  * @gfp:	Flag to be used while allocating memory
1748  *
1749  * Return codes:
1750  *	NULL:			Failure
1751  *	Allocated address:	Success
1752  */
1753 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1754 						  gfp_t gfp)
1755 {
1756 	struct pch_udc_request		*req;
1757 	struct pch_udc_ep		*ep;
1758 	struct pch_udc_data_dma_desc	*dma_desc;
1759 
1760 	if (!usbep)
1761 		return NULL;
1762 	ep = container_of(usbep, struct pch_udc_ep, ep);
1763 	req = kzalloc(sizeof *req, gfp);
1764 	if (!req)
1765 		return NULL;
1766 	req->req.dma = DMA_ADDR_INVALID;
1767 	req->dma = DMA_ADDR_INVALID;
1768 	INIT_LIST_HEAD(&req->queue);
1769 	if (!ep->dev->dma_addr)
1770 		return &req->req;
1771 	/* ep0 in requests are allocated from data pool here */
1772 	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1773 				  &req->td_data_phys);
1774 	if (NULL == dma_desc) {
1775 		kfree(req);
1776 		return NULL;
1777 	}
1778 	/* prevent from using desc. - set HOST BUSY */
1779 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1780 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1781 	req->td_data = dma_desc;
1782 	req->td_data_last = dma_desc;
1783 	req->chain_len = 1;
1784 	return &req->req;
1785 }
1786 
1787 /**
1788  * pch_udc_free_request() - This function frees request structure.
1789  *				It is called by gadget driver
1790  * @usbep:	Reference to the USB endpoint structure
1791  * @usbreq:	Reference to the USB request
1792  */
1793 static void pch_udc_free_request(struct usb_ep *usbep,
1794 				  struct usb_request *usbreq)
1795 {
1796 	struct pch_udc_ep	*ep;
1797 	struct pch_udc_request	*req;
1798 	struct pch_udc_dev	*dev;
1799 
1800 	if (!usbep || !usbreq)
1801 		return;
1802 	ep = container_of(usbep, struct pch_udc_ep, ep);
1803 	req = container_of(usbreq, struct pch_udc_request, req);
1804 	dev = ep->dev;
1805 	if (!list_empty(&req->queue))
1806 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1807 			__func__, usbep->name, req);
1808 	if (req->td_data != NULL) {
1809 		if (req->chain_len > 1)
1810 			pch_udc_free_dma_chain(ep->dev, req);
1811 		pci_pool_free(ep->dev->data_requests, req->td_data,
1812 			      req->td_data_phys);
1813 	}
1814 	kfree(req);
1815 }
1816 
1817 /**
1818  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1819  *			by gadget driver
1820  * @usbep:	Reference to the USB endpoint structure
1821  * @usbreq:	Reference to the USB request
1822  * @gfp:	Flag to be used while mapping the data buffer
1823  *
1824  * Return codes:
1825  *	0:			Success
1826  *	linux error number:	Failure
1827  */
1828 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1829 								 gfp_t gfp)
1830 {
1831 	int retval = 0;
1832 	struct pch_udc_ep	*ep;
1833 	struct pch_udc_dev	*dev;
1834 	struct pch_udc_request	*req;
1835 	unsigned long	iflags;
1836 
1837 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1838 		return -EINVAL;
1839 	ep = container_of(usbep, struct pch_udc_ep, ep);
1840 	dev = ep->dev;
1841 	if (!ep->ep.desc && ep->num)
1842 		return -EINVAL;
1843 	req = container_of(usbreq, struct pch_udc_request, req);
1844 	if (!list_empty(&req->queue))
1845 		return -EINVAL;
1846 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1847 		return -ESHUTDOWN;
1848 	spin_lock_irqsave(&dev->lock, iflags);
1849 	/* map the buffer for dma */
1850 	if (usbreq->length &&
1851 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1852 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1853 			if (ep->in)
1854 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1855 							     usbreq->buf,
1856 							     usbreq->length,
1857 							     DMA_TO_DEVICE);
1858 			else
1859 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1860 							     usbreq->buf,
1861 							     usbreq->length,
1862 							     DMA_FROM_DEVICE);
1863 		} else {
1864 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1865 			if (!req->buf) {
1866 				retval = -ENOMEM;
1867 				goto probe_end;
1868 			}
1869 			if (ep->in) {
1870 				memcpy(req->buf, usbreq->buf, usbreq->length);
1871 				req->dma = dma_map_single(&dev->pdev->dev,
1872 							  req->buf,
1873 							  usbreq->length,
1874 							  DMA_TO_DEVICE);
1875 			} else
1876 				req->dma = dma_map_single(&dev->pdev->dev,
1877 							  req->buf,
1878 							  usbreq->length,
1879 							  DMA_FROM_DEVICE);
1880 		}
1881 		req->dma_mapped = 1;
1882 	}
1883 	if (usbreq->length > 0) {
1884 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1885 		if (retval)
1886 			goto probe_end;
1887 	}
1888 	usbreq->actual = 0;
1889 	usbreq->status = -EINPROGRESS;
1890 	req->dma_done = 0;
1891 	if (list_empty(&ep->queue) && !ep->halted) {
1892 		/* no pending transfer, so start this req */
1893 		if (!usbreq->length) {
1894 			process_zlp(ep, req);
1895 			retval = 0;
1896 			goto probe_end;
1897 		}
1898 		if (!ep->in) {
1899 			pch_udc_start_rxrequest(ep, req);
1900 		} else {
1901 			/*
1902 			* For IN trfr the descriptors will be programmed and
1903 			* P bit will be set when
1904 			* we get an IN token
1905 			*/
1906 			pch_udc_wait_ep_stall(ep);
1907 			pch_udc_ep_clear_nak(ep);
1908 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1909 		}
1910 	}
1911 	/* Now add this request to the ep's pending requests */
1912 	if (req != NULL)
1913 		list_add_tail(&req->queue, &ep->queue);
1914 
1915 probe_end:
1916 	spin_unlock_irqrestore(&dev->lock, iflags);
1917 	return retval;
1918 }
1919 
1920 /**
1921  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1922  *				It is called by gadget driver
1923  * @usbep:	Reference to the USB endpoint structure
1924  * @usbreq:	Reference to the USB request
1925  *
1926  * Return codes:
1927  *	0:			Success
1928  *	linux error number:	Failure
1929  */
1930 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1931 				struct usb_request *usbreq)
1932 {
1933 	struct pch_udc_ep	*ep;
1934 	struct pch_udc_request	*req;
1935 	unsigned long		flags;
1936 	int ret = -EINVAL;
1937 
1938 	ep = container_of(usbep, struct pch_udc_ep, ep);
1939 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1940 		return ret;
1941 	req = container_of(usbreq, struct pch_udc_request, req);
1942 	spin_lock_irqsave(&ep->dev->lock, flags);
1943 	/* make sure it's still queued on this endpoint */
1944 	list_for_each_entry(req, &ep->queue, queue) {
1945 		if (&req->req == usbreq) {
1946 			pch_udc_ep_set_nak(ep);
1947 			if (!list_empty(&req->queue))
1948 				complete_req(ep, req, -ECONNRESET);
1949 			ret = 0;
1950 			break;
1951 		}
1952 	}
1953 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1954 	return ret;
1955 }
1956 
1957 /**
1958  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1959  *			    feature
1960  * @usbep:	Reference to the USB endpoint structure
1961  * @halt:	Specifies whether to set or clear the feature
1962  *
1963  * Return codes:
1964  *	0:			Success
1965  *	linux error number:	Failure
1966  */
1967 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1968 {
1969 	struct pch_udc_ep	*ep;
1970 	unsigned long iflags;
1971 	int ret;
1972 
1973 	if (!usbep)
1974 		return -EINVAL;
1975 	ep = container_of(usbep, struct pch_udc_ep, ep);
1976 	if (!ep->ep.desc && !ep->num)
1977 		return -EINVAL;
1978 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1979 		return -ESHUTDOWN;
1980 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1981 	if (list_empty(&ep->queue)) {
1982 		if (halt) {
1983 			if (ep->num == PCH_UDC_EP0)
1984 				ep->dev->stall = 1;
1985 			pch_udc_ep_set_stall(ep);
1986 			pch_udc_enable_ep_interrupts(
1987 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1988 		} else {
1989 			pch_udc_ep_clear_stall(ep);
1990 		}
1991 		ret = 0;
1992 	} else {
1993 		ret = -EAGAIN;
1994 	}
1995 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1996 	return ret;
1997 }
1998 
1999 /**
2000  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2001  *				halt feature
2002  * @usbep:	Reference to the USB endpoint structure
2003  * @halt:	Specifies whether to set or clear the feature
2004  *
2005  * Return codes:
2006  *	0:			Success
2007  *	linux error number:	Failure
2008  */
2009 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2010 {
2011 	struct pch_udc_ep	*ep;
2012 	unsigned long iflags;
2013 	int ret;
2014 
2015 	if (!usbep)
2016 		return -EINVAL;
2017 	ep = container_of(usbep, struct pch_udc_ep, ep);
2018 	if (!ep->ep.desc && !ep->num)
2019 		return -EINVAL;
2020 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2021 		return -ESHUTDOWN;
2022 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2023 	if (!list_empty(&ep->queue)) {
2024 		ret = -EAGAIN;
2025 	} else {
2026 		if (ep->num == PCH_UDC_EP0)
2027 			ep->dev->stall = 1;
2028 		pch_udc_ep_set_stall(ep);
2029 		pch_udc_enable_ep_interrupts(ep->dev,
2030 					     PCH_UDC_EPINT(ep->in, ep->num));
2031 		ep->dev->prot_stall = 1;
2032 		ret = 0;
2033 	}
2034 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2035 	return ret;
2036 }
2037 
2038 /**
2039  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2040  * @usbep:	Reference to the USB endpoint structure
2041  */
2042 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2043 {
2044 	struct pch_udc_ep  *ep;
2045 
2046 	if (!usbep)
2047 		return;
2048 
2049 	ep = container_of(usbep, struct pch_udc_ep, ep);
2050 	if (ep->ep.desc || !ep->num)
2051 		pch_udc_ep_fifo_flush(ep, ep->in);
2052 }
2053 
2054 static const struct usb_ep_ops pch_udc_ep_ops = {
2055 	.enable		= pch_udc_pcd_ep_enable,
2056 	.disable	= pch_udc_pcd_ep_disable,
2057 	.alloc_request	= pch_udc_alloc_request,
2058 	.free_request	= pch_udc_free_request,
2059 	.queue		= pch_udc_pcd_queue,
2060 	.dequeue	= pch_udc_pcd_dequeue,
2061 	.set_halt	= pch_udc_pcd_set_halt,
2062 	.set_wedge	= pch_udc_pcd_set_wedge,
2063 	.fifo_status	= NULL,
2064 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2065 };
2066 
2067 /**
2068  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2069  * @td_stp:	Reference to the SETP buffer structure
2070  */
2071 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2072 {
2073 	static u32	pky_marker;
2074 
2075 	if (!td_stp)
2076 		return;
2077 	td_stp->reserved = ++pky_marker;
2078 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2079 	td_stp->status = PCH_UDC_BS_HST_RDY;
2080 }
2081 
2082 /**
2083  * pch_udc_start_next_txrequest() - This function starts
2084  *					the next transmission requirement
2085  * @ep:	Reference to the endpoint structure
2086  */
2087 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2088 {
2089 	struct pch_udc_request *req;
2090 	struct pch_udc_data_dma_desc *td_data;
2091 
2092 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2093 		return;
2094 
2095 	if (list_empty(&ep->queue))
2096 		return;
2097 
2098 	/* next request */
2099 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2100 	if (req->dma_going)
2101 		return;
2102 	if (!req->td_data)
2103 		return;
2104 	pch_udc_wait_ep_stall(ep);
2105 	req->dma_going = 1;
2106 	pch_udc_ep_set_ddptr(ep, 0);
2107 	td_data = req->td_data;
2108 	while (1) {
2109 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2110 				   PCH_UDC_BS_HST_RDY;
2111 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2112 			break;
2113 		td_data = phys_to_virt(td_data->next);
2114 	}
2115 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2116 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2117 	pch_udc_ep_set_pd(ep);
2118 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2119 	pch_udc_ep_clear_nak(ep);
2120 }
2121 
2122 /**
2123  * pch_udc_complete_transfer() - This function completes a transfer
2124  * @ep:		Reference to the endpoint structure
2125  */
2126 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2127 {
2128 	struct pch_udc_request *req;
2129 	struct pch_udc_dev *dev = ep->dev;
2130 
2131 	if (list_empty(&ep->queue))
2132 		return;
2133 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2134 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2135 	    PCH_UDC_BS_DMA_DONE)
2136 		return;
2137 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2138 	     PCH_UDC_RTS_SUCC) {
2139 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2140 			"epstatus=0x%08x\n",
2141 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2142 		       (int)(ep->epsts));
2143 		return;
2144 	}
2145 
2146 	req->req.actual = req->req.length;
2147 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2148 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2149 	complete_req(ep, req, 0);
2150 	req->dma_going = 0;
2151 	if (!list_empty(&ep->queue)) {
2152 		pch_udc_wait_ep_stall(ep);
2153 		pch_udc_ep_clear_nak(ep);
2154 		pch_udc_enable_ep_interrupts(ep->dev,
2155 					     PCH_UDC_EPINT(ep->in, ep->num));
2156 	} else {
2157 		pch_udc_disable_ep_interrupts(ep->dev,
2158 					      PCH_UDC_EPINT(ep->in, ep->num));
2159 	}
2160 }
2161 
2162 /**
2163  * pch_udc_complete_receiver() - This function completes a receiver
2164  * @ep:		Reference to the endpoint structure
2165  */
2166 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2167 {
2168 	struct pch_udc_request *req;
2169 	struct pch_udc_dev *dev = ep->dev;
2170 	unsigned int count;
2171 	struct pch_udc_data_dma_desc *td;
2172 	dma_addr_t addr;
2173 
2174 	if (list_empty(&ep->queue))
2175 		return;
2176 	/* next request */
2177 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2178 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2179 	pch_udc_ep_set_ddptr(ep, 0);
2180 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2181 	    PCH_UDC_BS_DMA_DONE)
2182 		td = req->td_data_last;
2183 	else
2184 		td = req->td_data;
2185 
2186 	while (1) {
2187 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2188 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2189 				"epstatus=0x%08x\n",
2190 				(req->td_data->status & PCH_UDC_RXTX_STS),
2191 				(int)(ep->epsts));
2192 			return;
2193 		}
2194 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2195 			if (td->status & PCH_UDC_DMA_LAST) {
2196 				count = td->status & PCH_UDC_RXTX_BYTES;
2197 				break;
2198 			}
2199 		if (td == req->td_data_last) {
2200 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2201 			return;
2202 		}
2203 		addr = (dma_addr_t)td->next;
2204 		td = phys_to_virt(addr);
2205 	}
2206 	/* on 64k packets the RXBYTES field is zero */
2207 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2208 		count = UDC_DMA_MAXPACKET;
2209 	req->td_data->status |= PCH_UDC_DMA_LAST;
2210 	td->status |= PCH_UDC_BS_HST_BSY;
2211 
2212 	req->dma_going = 0;
2213 	req->req.actual = count;
2214 	complete_req(ep, req, 0);
2215 	/* If there is a new/failed requests try that now */
2216 	if (!list_empty(&ep->queue)) {
2217 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2218 		pch_udc_start_rxrequest(ep, req);
2219 	}
2220 }
2221 
2222 /**
2223  * pch_udc_svc_data_in() - This function process endpoint interrupts
2224  *				for IN endpoints
2225  * @dev:	Reference to the device structure
2226  * @ep_num:	Endpoint that generated the interrupt
2227  */
2228 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2229 {
2230 	u32	epsts;
2231 	struct pch_udc_ep	*ep;
2232 
2233 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2234 	epsts = ep->epsts;
2235 	ep->epsts = 0;
2236 
2237 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2238 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2239 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2240 		return;
2241 	if ((epsts & UDC_EPSTS_BNA))
2242 		return;
2243 	if (epsts & UDC_EPSTS_HE)
2244 		return;
2245 	if (epsts & UDC_EPSTS_RSS) {
2246 		pch_udc_ep_set_stall(ep);
2247 		pch_udc_enable_ep_interrupts(ep->dev,
2248 					     PCH_UDC_EPINT(ep->in, ep->num));
2249 	}
2250 	if (epsts & UDC_EPSTS_RCS) {
2251 		if (!dev->prot_stall) {
2252 			pch_udc_ep_clear_stall(ep);
2253 		} else {
2254 			pch_udc_ep_set_stall(ep);
2255 			pch_udc_enable_ep_interrupts(ep->dev,
2256 						PCH_UDC_EPINT(ep->in, ep->num));
2257 		}
2258 	}
2259 	if (epsts & UDC_EPSTS_TDC)
2260 		pch_udc_complete_transfer(ep);
2261 	/* On IN interrupt, provide data if we have any */
2262 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2263 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2264 		pch_udc_start_next_txrequest(ep);
2265 }
2266 
2267 /**
2268  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2269  * @dev:	Reference to the device structure
2270  * @ep_num:	Endpoint that generated the interrupt
2271  */
2272 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2273 {
2274 	u32			epsts;
2275 	struct pch_udc_ep		*ep;
2276 	struct pch_udc_request		*req = NULL;
2277 
2278 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2279 	epsts = ep->epsts;
2280 	ep->epsts = 0;
2281 
2282 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2283 		/* next request */
2284 		req = list_entry(ep->queue.next, struct pch_udc_request,
2285 				 queue);
2286 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2287 		     PCH_UDC_BS_DMA_DONE) {
2288 			if (!req->dma_going)
2289 				pch_udc_start_rxrequest(ep, req);
2290 			return;
2291 		}
2292 	}
2293 	if (epsts & UDC_EPSTS_HE)
2294 		return;
2295 	if (epsts & UDC_EPSTS_RSS) {
2296 		pch_udc_ep_set_stall(ep);
2297 		pch_udc_enable_ep_interrupts(ep->dev,
2298 					     PCH_UDC_EPINT(ep->in, ep->num));
2299 	}
2300 	if (epsts & UDC_EPSTS_RCS) {
2301 		if (!dev->prot_stall) {
2302 			pch_udc_ep_clear_stall(ep);
2303 		} else {
2304 			pch_udc_ep_set_stall(ep);
2305 			pch_udc_enable_ep_interrupts(ep->dev,
2306 						PCH_UDC_EPINT(ep->in, ep->num));
2307 		}
2308 	}
2309 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2310 	    UDC_EPSTS_OUT_DATA) {
2311 		if (ep->dev->prot_stall == 1) {
2312 			pch_udc_ep_set_stall(ep);
2313 			pch_udc_enable_ep_interrupts(ep->dev,
2314 						PCH_UDC_EPINT(ep->in, ep->num));
2315 		} else {
2316 			pch_udc_complete_receiver(ep);
2317 		}
2318 	}
2319 	if (list_empty(&ep->queue))
2320 		pch_udc_set_dma(dev, DMA_DIR_RX);
2321 }
2322 
2323 /**
2324  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2325  * @dev:	Reference to the device structure
2326  */
2327 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2328 {
2329 	u32	epsts;
2330 	struct pch_udc_ep	*ep;
2331 	struct pch_udc_ep	*ep_out;
2332 
2333 	ep = &dev->ep[UDC_EP0IN_IDX];
2334 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2335 	epsts = ep->epsts;
2336 	ep->epsts = 0;
2337 
2338 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2339 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2340 		       UDC_EPSTS_XFERDONE)))
2341 		return;
2342 	if ((epsts & UDC_EPSTS_BNA))
2343 		return;
2344 	if (epsts & UDC_EPSTS_HE)
2345 		return;
2346 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2347 		pch_udc_complete_transfer(ep);
2348 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2349 		ep_out->td_data->status = (ep_out->td_data->status &
2350 					~PCH_UDC_BUFF_STS) |
2351 					PCH_UDC_BS_HST_RDY;
2352 		pch_udc_ep_clear_nak(ep_out);
2353 		pch_udc_set_dma(dev, DMA_DIR_RX);
2354 		pch_udc_ep_set_rrdy(ep_out);
2355 	}
2356 	/* On IN interrupt, provide data if we have any */
2357 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2358 	     !(epsts & UDC_EPSTS_TXEMPTY))
2359 		pch_udc_start_next_txrequest(ep);
2360 }
2361 
2362 /**
2363  * pch_udc_svc_control_out() - Routine that handle Control
2364  *					OUT endpoint interrupts
2365  * @dev:	Reference to the device structure
2366  */
2367 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2368 	__releases(&dev->lock)
2369 	__acquires(&dev->lock)
2370 {
2371 	u32	stat;
2372 	int setup_supported;
2373 	struct pch_udc_ep	*ep;
2374 
2375 	ep = &dev->ep[UDC_EP0OUT_IDX];
2376 	stat = ep->epsts;
2377 	ep->epsts = 0;
2378 
2379 	/* If setup data */
2380 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2381 	    UDC_EPSTS_OUT_SETUP) {
2382 		dev->stall = 0;
2383 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2384 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2385 		dev->setup_data = ep->td_stp->request;
2386 		pch_udc_init_setup_buff(ep->td_stp);
2387 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2388 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2389 				      dev->ep[UDC_EP0IN_IDX].in);
2390 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2391 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2392 		else /* OUT */
2393 			dev->gadget.ep0 = &ep->ep;
2394 		spin_lock(&dev->lock);
2395 		/* If Mass storage Reset */
2396 		if ((dev->setup_data.bRequestType == 0x21) &&
2397 		    (dev->setup_data.bRequest == 0xFF))
2398 			dev->prot_stall = 0;
2399 		/* call gadget with setup data received */
2400 		setup_supported = dev->driver->setup(&dev->gadget,
2401 						     &dev->setup_data);
2402 		spin_unlock(&dev->lock);
2403 
2404 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2405 			ep->td_data->status = (ep->td_data->status &
2406 						~PCH_UDC_BUFF_STS) |
2407 						PCH_UDC_BS_HST_RDY;
2408 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2409 		}
2410 		/* ep0 in returns data on IN phase */
2411 		if (setup_supported >= 0 && setup_supported <
2412 					    UDC_EP0IN_MAX_PKT_SIZE) {
2413 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2414 			/* Gadget would have queued a request when
2415 			 * we called the setup */
2416 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2417 				pch_udc_set_dma(dev, DMA_DIR_RX);
2418 				pch_udc_ep_clear_nak(ep);
2419 			}
2420 		} else if (setup_supported < 0) {
2421 			/* if unsupported request, then stall */
2422 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2423 			pch_udc_enable_ep_interrupts(ep->dev,
2424 						PCH_UDC_EPINT(ep->in, ep->num));
2425 			dev->stall = 0;
2426 			pch_udc_set_dma(dev, DMA_DIR_RX);
2427 		} else {
2428 			dev->waiting_zlp_ack = 1;
2429 		}
2430 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2431 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2432 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2433 		pch_udc_ep_set_ddptr(ep, 0);
2434 		if (!list_empty(&ep->queue)) {
2435 			ep->epsts = stat;
2436 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2437 		}
2438 		pch_udc_set_dma(dev, DMA_DIR_RX);
2439 	}
2440 	pch_udc_ep_set_rrdy(ep);
2441 }
2442 
2443 
2444 /**
2445  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2446  *				and clears NAK status
2447  * @dev:	Reference to the device structure
2448  * @ep_num:	End point number
2449  */
2450 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2451 {
2452 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2453 	if (list_empty(&ep->queue))
2454 		return;
2455 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2456 	pch_udc_ep_clear_nak(ep);
2457 }
2458 
2459 /**
2460  * pch_udc_read_all_epstatus() - This function read all endpoint status
2461  * @dev:	Reference to the device structure
2462  * @ep_intr:	Status of endpoint interrupt
2463  */
2464 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2465 {
2466 	int i;
2467 	struct pch_udc_ep	*ep;
2468 
2469 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2470 		/* IN */
2471 		if (ep_intr & (0x1 << i)) {
2472 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2473 			ep->epsts = pch_udc_read_ep_status(ep);
2474 			pch_udc_clear_ep_status(ep, ep->epsts);
2475 		}
2476 		/* OUT */
2477 		if (ep_intr & (0x10000 << i)) {
2478 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2479 			ep->epsts = pch_udc_read_ep_status(ep);
2480 			pch_udc_clear_ep_status(ep, ep->epsts);
2481 		}
2482 	}
2483 }
2484 
2485 /**
2486  * pch_udc_activate_control_ep() - This function enables the control endpoints
2487  *					for traffic after a reset
2488  * @dev:	Reference to the device structure
2489  */
2490 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2491 {
2492 	struct pch_udc_ep	*ep;
2493 	u32 val;
2494 
2495 	/* Setup the IN endpoint */
2496 	ep = &dev->ep[UDC_EP0IN_IDX];
2497 	pch_udc_clear_ep_control(ep);
2498 	pch_udc_ep_fifo_flush(ep, ep->in);
2499 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2500 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2501 	/* Initialize the IN EP Descriptor */
2502 	ep->td_data      = NULL;
2503 	ep->td_stp       = NULL;
2504 	ep->td_data_phys = 0;
2505 	ep->td_stp_phys  = 0;
2506 
2507 	/* Setup the OUT endpoint */
2508 	ep = &dev->ep[UDC_EP0OUT_IDX];
2509 	pch_udc_clear_ep_control(ep);
2510 	pch_udc_ep_fifo_flush(ep, ep->in);
2511 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2512 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2513 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2514 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2515 
2516 	/* Initialize the SETUP buffer */
2517 	pch_udc_init_setup_buff(ep->td_stp);
2518 	/* Write the pointer address of dma descriptor */
2519 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2520 	/* Write the pointer address of Setup descriptor */
2521 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2522 
2523 	/* Initialize the dma descriptor */
2524 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2525 	ep->td_data->dataptr = dev->dma_addr;
2526 	ep->td_data->next    = ep->td_data_phys;
2527 
2528 	pch_udc_ep_clear_nak(ep);
2529 }
2530 
2531 
2532 /**
2533  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2534  * @dev:	Reference to driver structure
2535  */
2536 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2537 {
2538 	struct pch_udc_ep	*ep;
2539 	int i;
2540 
2541 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2542 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2543 	/* Mask all endpoint interrupts */
2544 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2545 	/* clear all endpoint interrupts */
2546 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2547 
2548 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2549 		ep = &dev->ep[i];
2550 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2551 		pch_udc_clear_ep_control(ep);
2552 		pch_udc_ep_set_ddptr(ep, 0);
2553 		pch_udc_write_csr(ep->dev, 0x00, i);
2554 	}
2555 	dev->stall = 0;
2556 	dev->prot_stall = 0;
2557 	dev->waiting_zlp_ack = 0;
2558 	dev->set_cfg_not_acked = 0;
2559 
2560 	/* disable ep to empty req queue. Skip the control EP's */
2561 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2562 		ep = &dev->ep[i];
2563 		pch_udc_ep_set_nak(ep);
2564 		pch_udc_ep_fifo_flush(ep, ep->in);
2565 		/* Complete request queue */
2566 		empty_req_queue(ep);
2567 	}
2568 	if (dev->driver) {
2569 		spin_unlock(&dev->lock);
2570 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2571 		spin_lock(&dev->lock);
2572 	}
2573 }
2574 
2575 /**
2576  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2577  *				done interrupt
2578  * @dev:	Reference to driver structure
2579  */
2580 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2581 {
2582 	u32 dev_stat, dev_speed;
2583 	u32 speed = USB_SPEED_FULL;
2584 
2585 	dev_stat = pch_udc_read_device_status(dev);
2586 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2587 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2588 	switch (dev_speed) {
2589 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2590 		speed = USB_SPEED_HIGH;
2591 		break;
2592 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2593 		speed = USB_SPEED_FULL;
2594 		break;
2595 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2596 		speed = USB_SPEED_LOW;
2597 		break;
2598 	default:
2599 		BUG();
2600 	}
2601 	dev->gadget.speed = speed;
2602 	pch_udc_activate_control_ep(dev);
2603 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2604 	pch_udc_set_dma(dev, DMA_DIR_TX);
2605 	pch_udc_set_dma(dev, DMA_DIR_RX);
2606 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2607 
2608 	/* enable device interrupts */
2609 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2610 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2611 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2612 }
2613 
2614 /**
2615  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2616  *				  interrupt
2617  * @dev:	Reference to driver structure
2618  */
2619 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2620 {
2621 	u32 reg, dev_stat = 0;
2622 	int i;
2623 
2624 	dev_stat = pch_udc_read_device_status(dev);
2625 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2626 							 UDC_DEVSTS_INTF_SHIFT;
2627 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2628 							 UDC_DEVSTS_ALT_SHIFT;
2629 	dev->set_cfg_not_acked = 1;
2630 	/* Construct the usb request for gadget driver and inform it */
2631 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2632 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2633 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2634 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2635 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2636 	/* programm the Endpoint Cfg registers */
2637 	/* Only one end point cfg register */
2638 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2639 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2640 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2641 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2642 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2643 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2644 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2645 		/* clear stall bits */
2646 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2647 		dev->ep[i].halted = 0;
2648 	}
2649 	dev->stall = 0;
2650 	spin_unlock(&dev->lock);
2651 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2652 	spin_lock(&dev->lock);
2653 }
2654 
2655 /**
2656  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2657  *				interrupt
2658  * @dev:	Reference to driver structure
2659  */
2660 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2661 {
2662 	int i;
2663 	u32 reg, dev_stat = 0;
2664 
2665 	dev_stat = pch_udc_read_device_status(dev);
2666 	dev->set_cfg_not_acked = 1;
2667 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2668 				UDC_DEVSTS_CFG_SHIFT;
2669 	/* make usb request for gadget driver */
2670 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2671 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2672 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2673 	/* program the NE registers */
2674 	/* Only one end point cfg register */
2675 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2676 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2677 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2678 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2679 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2680 		/* clear stall bits */
2681 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2682 		dev->ep[i].halted = 0;
2683 	}
2684 	dev->stall = 0;
2685 
2686 	/* call gadget zero with setup data received */
2687 	spin_unlock(&dev->lock);
2688 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2689 	spin_lock(&dev->lock);
2690 }
2691 
2692 /**
2693  * pch_udc_dev_isr() - This function services device interrupts
2694  *			by invoking appropriate routines.
2695  * @dev:	Reference to the device structure
2696  * @dev_intr:	The Device interrupt status.
2697  */
2698 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2699 {
2700 	int vbus;
2701 
2702 	/* USB Reset Interrupt */
2703 	if (dev_intr & UDC_DEVINT_UR) {
2704 		pch_udc_svc_ur_interrupt(dev);
2705 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2706 	}
2707 	/* Enumeration Done Interrupt */
2708 	if (dev_intr & UDC_DEVINT_ENUM) {
2709 		pch_udc_svc_enum_interrupt(dev);
2710 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2711 	}
2712 	/* Set Interface Interrupt */
2713 	if (dev_intr & UDC_DEVINT_SI)
2714 		pch_udc_svc_intf_interrupt(dev);
2715 	/* Set Config Interrupt */
2716 	if (dev_intr & UDC_DEVINT_SC)
2717 		pch_udc_svc_cfg_interrupt(dev);
2718 	/* USB Suspend interrupt */
2719 	if (dev_intr & UDC_DEVINT_US) {
2720 		if (dev->driver
2721 			&& dev->driver->suspend) {
2722 			spin_unlock(&dev->lock);
2723 			dev->driver->suspend(&dev->gadget);
2724 			spin_lock(&dev->lock);
2725 		}
2726 
2727 		vbus = pch_vbus_gpio_get_value(dev);
2728 		if ((dev->vbus_session == 0)
2729 			&& (vbus != 1)) {
2730 			if (dev->driver && dev->driver->disconnect) {
2731 				spin_unlock(&dev->lock);
2732 				dev->driver->disconnect(&dev->gadget);
2733 				spin_lock(&dev->lock);
2734 			}
2735 			pch_udc_reconnect(dev);
2736 		} else if ((dev->vbus_session == 0)
2737 			&& (vbus == 1)
2738 			&& !dev->vbus_gpio.intr)
2739 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2740 
2741 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2742 	}
2743 	/* Clear the SOF interrupt, if enabled */
2744 	if (dev_intr & UDC_DEVINT_SOF)
2745 		dev_dbg(&dev->pdev->dev, "SOF\n");
2746 	/* ES interrupt, IDLE > 3ms on the USB */
2747 	if (dev_intr & UDC_DEVINT_ES)
2748 		dev_dbg(&dev->pdev->dev, "ES\n");
2749 	/* RWKP interrupt */
2750 	if (dev_intr & UDC_DEVINT_RWKP)
2751 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2752 }
2753 
2754 /**
2755  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2756  * @irq:	Interrupt request number
2757  * @dev:	Reference to the device structure
2758  */
2759 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2760 {
2761 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2762 	u32 dev_intr, ep_intr;
2763 	int i;
2764 
2765 	dev_intr = pch_udc_read_device_interrupts(dev);
2766 	ep_intr = pch_udc_read_ep_interrupts(dev);
2767 
2768 	/* For a hot plug, this find that the controller is hung up. */
2769 	if (dev_intr == ep_intr)
2770 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2771 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2772 			/* The controller is reset */
2773 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2774 			return IRQ_HANDLED;
2775 		}
2776 	if (dev_intr)
2777 		/* Clear device interrupts */
2778 		pch_udc_write_device_interrupts(dev, dev_intr);
2779 	if (ep_intr)
2780 		/* Clear ep interrupts */
2781 		pch_udc_write_ep_interrupts(dev, ep_intr);
2782 	if (!dev_intr && !ep_intr)
2783 		return IRQ_NONE;
2784 	spin_lock(&dev->lock);
2785 	if (dev_intr)
2786 		pch_udc_dev_isr(dev, dev_intr);
2787 	if (ep_intr) {
2788 		pch_udc_read_all_epstatus(dev, ep_intr);
2789 		/* Process Control In interrupts, if present */
2790 		if (ep_intr & UDC_EPINT_IN_EP0) {
2791 			pch_udc_svc_control_in(dev);
2792 			pch_udc_postsvc_epinters(dev, 0);
2793 		}
2794 		/* Process Control Out interrupts, if present */
2795 		if (ep_intr & UDC_EPINT_OUT_EP0)
2796 			pch_udc_svc_control_out(dev);
2797 		/* Process data in end point interrupts */
2798 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2799 			if (ep_intr & (1 <<  i)) {
2800 				pch_udc_svc_data_in(dev, i);
2801 				pch_udc_postsvc_epinters(dev, i);
2802 			}
2803 		}
2804 		/* Process data out end point interrupts */
2805 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2806 						 PCH_UDC_USED_EP_NUM); i++)
2807 			if (ep_intr & (1 <<  i))
2808 				pch_udc_svc_data_out(dev, i -
2809 							 UDC_EPINT_OUT_SHIFT);
2810 	}
2811 	spin_unlock(&dev->lock);
2812 	return IRQ_HANDLED;
2813 }
2814 
2815 /**
2816  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2817  * @dev:	Reference to the device structure
2818  */
2819 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2820 {
2821 	/* enable ep0 interrupts */
2822 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2823 						UDC_EPINT_OUT_EP0);
2824 	/* enable device interrupts */
2825 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2826 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2827 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2828 }
2829 
2830 /**
2831  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2832  * @dev:	Reference to the driver structure
2833  */
2834 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2835 {
2836 	const char *const ep_string[] = {
2837 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2838 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2839 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2840 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2841 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2842 		"ep15in", "ep15out",
2843 	};
2844 	int i;
2845 
2846 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2847 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2848 
2849 	/* Initialize the endpoints structures */
2850 	memset(dev->ep, 0, sizeof dev->ep);
2851 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2852 		struct pch_udc_ep *ep = &dev->ep[i];
2853 		ep->dev = dev;
2854 		ep->halted = 1;
2855 		ep->num = i / 2;
2856 		ep->in = ~i & 1;
2857 		ep->ep.name = ep_string[i];
2858 		ep->ep.ops = &pch_udc_ep_ops;
2859 		if (ep->in) {
2860 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2861 			ep->ep.caps.dir_in = true;
2862 		} else {
2863 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2864 					  UDC_EP_REG_SHIFT;
2865 			ep->ep.caps.dir_out = true;
2866 		}
2867 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2868 			ep->ep.caps.type_control = true;
2869 		} else {
2870 			ep->ep.caps.type_iso = true;
2871 			ep->ep.caps.type_bulk = true;
2872 			ep->ep.caps.type_int = true;
2873 		}
2874 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2875 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2876 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2877 		INIT_LIST_HEAD(&ep->queue);
2878 	}
2879 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2880 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2881 
2882 	/* remove ep0 in and out from the list.  They have own pointer */
2883 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2884 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2885 
2886 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2887 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2888 }
2889 
2890 /**
2891  * pch_udc_pcd_init() - This API initializes the driver structure
2892  * @dev:	Reference to the driver structure
2893  *
2894  * Return codes:
2895  *	0: Success
2896  */
2897 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2898 {
2899 	pch_udc_init(dev);
2900 	pch_udc_pcd_reinit(dev);
2901 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2902 	return 0;
2903 }
2904 
2905 /**
2906  * init_dma_pools() - create dma pools during initialization
2907  * @pdev:	reference to struct pci_dev
2908  */
2909 static int init_dma_pools(struct pch_udc_dev *dev)
2910 {
2911 	struct pch_udc_stp_dma_desc	*td_stp;
2912 	struct pch_udc_data_dma_desc	*td_data;
2913 	void				*ep0out_buf;
2914 
2915 	/* DMA setup */
2916 	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2917 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2918 	if (!dev->data_requests) {
2919 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2920 			__func__);
2921 		return -ENOMEM;
2922 	}
2923 
2924 	/* dma desc for setup data */
2925 	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2926 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2927 	if (!dev->stp_requests) {
2928 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2929 			__func__);
2930 		return -ENOMEM;
2931 	}
2932 	/* setup */
2933 	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2934 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2935 	if (!td_stp) {
2936 		dev_err(&dev->pdev->dev,
2937 			"%s: can't allocate setup dma descriptor\n", __func__);
2938 		return -ENOMEM;
2939 	}
2940 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2941 
2942 	/* data: 0 packets !? */
2943 	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2944 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2945 	if (!td_data) {
2946 		dev_err(&dev->pdev->dev,
2947 			"%s: can't allocate data dma descriptor\n", __func__);
2948 		return -ENOMEM;
2949 	}
2950 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2951 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2952 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2953 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2954 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2955 
2956 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2957 				  GFP_KERNEL);
2958 	if (!ep0out_buf)
2959 		return -ENOMEM;
2960 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2961 				       UDC_EP0OUT_BUFF_SIZE * 4,
2962 				       DMA_FROM_DEVICE);
2963 	return 0;
2964 }
2965 
2966 static int pch_udc_start(struct usb_gadget *g,
2967 		struct usb_gadget_driver *driver)
2968 {
2969 	struct pch_udc_dev	*dev = to_pch_udc(g);
2970 
2971 	driver->driver.bus = NULL;
2972 	dev->driver = driver;
2973 
2974 	/* get ready for ep0 traffic */
2975 	pch_udc_setup_ep0(dev);
2976 
2977 	/* clear SD */
2978 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2979 		pch_udc_clear_disconnect(dev);
2980 
2981 	dev->connected = 1;
2982 	return 0;
2983 }
2984 
2985 static int pch_udc_stop(struct usb_gadget *g)
2986 {
2987 	struct pch_udc_dev	*dev = to_pch_udc(g);
2988 
2989 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2990 
2991 	/* Assures that there are no pending requests with this driver */
2992 	dev->driver = NULL;
2993 	dev->connected = 0;
2994 
2995 	/* set SD */
2996 	pch_udc_set_disconnect(dev);
2997 
2998 	return 0;
2999 }
3000 
3001 static void pch_udc_shutdown(struct pci_dev *pdev)
3002 {
3003 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3004 
3005 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3006 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3007 
3008 	/* disable the pullup so the host will think we're gone */
3009 	pch_udc_set_disconnect(dev);
3010 }
3011 
3012 static void pch_udc_remove(struct pci_dev *pdev)
3013 {
3014 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3015 
3016 	usb_del_gadget_udc(&dev->gadget);
3017 
3018 	/* gadget driver must not be registered */
3019 	if (dev->driver)
3020 		dev_err(&pdev->dev,
3021 			"%s: gadget driver still bound!!!\n", __func__);
3022 	/* dma pool cleanup */
3023 	if (dev->data_requests)
3024 		pci_pool_destroy(dev->data_requests);
3025 
3026 	if (dev->stp_requests) {
3027 		/* cleanup DMA desc's for ep0in */
3028 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3029 			pci_pool_free(dev->stp_requests,
3030 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3031 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3032 		}
3033 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3034 			pci_pool_free(dev->stp_requests,
3035 				dev->ep[UDC_EP0OUT_IDX].td_data,
3036 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3037 		}
3038 		pci_pool_destroy(dev->stp_requests);
3039 	}
3040 
3041 	if (dev->dma_addr)
3042 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3043 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3044 
3045 	pch_vbus_gpio_free(dev);
3046 
3047 	pch_udc_exit(dev);
3048 }
3049 
3050 #ifdef CONFIG_PM_SLEEP
3051 static int pch_udc_suspend(struct device *d)
3052 {
3053 	struct pci_dev *pdev = to_pci_dev(d);
3054 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3055 
3056 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3057 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3058 
3059 	return 0;
3060 }
3061 
3062 static int pch_udc_resume(struct device *d)
3063 {
3064 	return 0;
3065 }
3066 
3067 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3068 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3069 #else
3070 #define PCH_UDC_PM_OPS		NULL
3071 #endif /* CONFIG_PM_SLEEP */
3072 
3073 static int pch_udc_probe(struct pci_dev *pdev,
3074 			  const struct pci_device_id *id)
3075 {
3076 	int			bar;
3077 	int			retval;
3078 	struct pch_udc_dev	*dev;
3079 
3080 	/* init */
3081 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3082 	if (!dev)
3083 		return -ENOMEM;
3084 
3085 	/* pci setup */
3086 	retval = pcim_enable_device(pdev);
3087 	if (retval)
3088 		return retval;
3089 
3090 	pci_set_drvdata(pdev, dev);
3091 
3092 	/* Determine BAR based on PCI ID */
3093 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3094 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3095 	else
3096 		bar = PCH_UDC_PCI_BAR;
3097 
3098 	/* PCI resource allocation */
3099 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3100 	if (retval)
3101 		return retval;
3102 
3103 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3104 
3105 	/* initialize the hardware */
3106 	if (pch_udc_pcd_init(dev))
3107 		return -ENODEV;
3108 
3109 	pci_enable_msi(pdev);
3110 
3111 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3112 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3113 	if (retval) {
3114 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3115 			pdev->irq);
3116 		goto finished;
3117 	}
3118 
3119 	pci_set_master(pdev);
3120 	pci_try_set_mwi(pdev);
3121 
3122 	/* device struct setup */
3123 	spin_lock_init(&dev->lock);
3124 	dev->pdev = pdev;
3125 	dev->gadget.ops = &pch_udc_ops;
3126 
3127 	retval = init_dma_pools(dev);
3128 	if (retval)
3129 		goto finished;
3130 
3131 	dev->gadget.name = KBUILD_MODNAME;
3132 	dev->gadget.max_speed = USB_SPEED_HIGH;
3133 
3134 	/* Put the device in disconnected state till a driver is bound */
3135 	pch_udc_set_disconnect(dev);
3136 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3137 	if (retval)
3138 		goto finished;
3139 	return 0;
3140 
3141 finished:
3142 	pch_udc_remove(pdev);
3143 	return retval;
3144 }
3145 
3146 static const struct pci_device_id pch_udc_pcidev_id[] = {
3147 	{
3148 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3149 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3150 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3151 		.class_mask = 0xffffffff,
3152 	},
3153 	{
3154 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3155 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3156 		.class_mask = 0xffffffff,
3157 	},
3158 	{
3159 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3160 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3161 		.class_mask = 0xffffffff,
3162 	},
3163 	{
3164 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3165 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3166 		.class_mask = 0xffffffff,
3167 	},
3168 	{ 0 },
3169 };
3170 
3171 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3172 
3173 static struct pci_driver pch_udc_driver = {
3174 	.name =	KBUILD_MODNAME,
3175 	.id_table =	pch_udc_pcidev_id,
3176 	.probe =	pch_udc_probe,
3177 	.remove =	pch_udc_remove,
3178 	.shutdown =	pch_udc_shutdown,
3179 	.driver = {
3180 		.pm = PCH_UDC_PM_OPS,
3181 	},
3182 };
3183 
3184 module_pci_driver(pch_udc_driver);
3185 
3186 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3187 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3188 MODULE_LICENSE("GPL");
3189