xref: /linux/drivers/usb/dwc2/gadget.c (revision 5fd54ace4721fc5ce2bb5aef6318fcf17f421460)
1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  *
6  * Copyright 2008 Openmoko, Inc.
7  * Copyright 2008 Simtec Electronics
8  *      Ben Dooks <ben@simtec.co.uk>
9  *      http://armlinux.simtec.co.uk/
10  *
11  * S3C USB2.0 High-speed / OtG driver
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/mutex.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/slab.h>
29 #include <linux/of_platform.h>
30 
31 #include <linux/usb/ch9.h>
32 #include <linux/usb/gadget.h>
33 #include <linux/usb/phy.h>
34 
35 #include "core.h"
36 #include "hw.h"
37 
38 /* conversion functions */
39 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
40 {
41 	return container_of(req, struct dwc2_hsotg_req, req);
42 }
43 
44 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
45 {
46 	return container_of(ep, struct dwc2_hsotg_ep, ep);
47 }
48 
49 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
50 {
51 	return container_of(gadget, struct dwc2_hsotg, gadget);
52 }
53 
54 static inline void __orr32(void __iomem *ptr, u32 val)
55 {
56 	dwc2_writel(dwc2_readl(ptr) | val, ptr);
57 }
58 
59 static inline void __bic32(void __iomem *ptr, u32 val)
60 {
61 	dwc2_writel(dwc2_readl(ptr) & ~val, ptr);
62 }
63 
64 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
65 						u32 ep_index, u32 dir_in)
66 {
67 	if (dir_in)
68 		return hsotg->eps_in[ep_index];
69 	else
70 		return hsotg->eps_out[ep_index];
71 }
72 
73 /* forward declaration of functions */
74 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
75 
76 /**
77  * using_dma - return the DMA status of the driver.
78  * @hsotg: The driver state.
79  *
80  * Return true if we're using DMA.
81  *
82  * Currently, we have the DMA support code worked into everywhere
83  * that needs it, but the AMBA DMA implementation in the hardware can
84  * only DMA from 32bit aligned addresses. This means that gadgets such
85  * as the CDC Ethernet cannot work as they often pass packets which are
86  * not 32bit aligned.
87  *
88  * Unfortunately the choice to use DMA or not is global to the controller
89  * and seems to be only settable when the controller is being put through
90  * a core reset. This means we either need to fix the gadgets to take
91  * account of DMA alignment, or add bounce buffers (yuerk).
92  *
93  * g_using_dma is set depending on dts flag.
94  */
95 static inline bool using_dma(struct dwc2_hsotg *hsotg)
96 {
97 	return hsotg->params.g_dma;
98 }
99 
100 /*
101  * using_desc_dma - return the descriptor DMA status of the driver.
102  * @hsotg: The driver state.
103  *
104  * Return true if we're using descriptor DMA.
105  */
106 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
107 {
108 	return hsotg->params.g_dma_desc;
109 }
110 
111 /**
112  * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
113  * @hs_ep: The endpoint
114  * @increment: The value to increment by
115  *
116  * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
117  * If an overrun occurs it will wrap the value and set the frame_overrun flag.
118  */
119 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
120 {
121 	hs_ep->target_frame += hs_ep->interval;
122 	if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
123 		hs_ep->frame_overrun = 1;
124 		hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
125 	} else {
126 		hs_ep->frame_overrun = 0;
127 	}
128 }
129 
130 /**
131  * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
132  * @hsotg: The device state
133  * @ints: A bitmask of the interrupts to enable
134  */
135 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
136 {
137 	u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK);
138 	u32 new_gsintmsk;
139 
140 	new_gsintmsk = gsintmsk | ints;
141 
142 	if (new_gsintmsk != gsintmsk) {
143 		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
144 		dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK);
145 	}
146 }
147 
148 /**
149  * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
150  * @hsotg: The device state
151  * @ints: A bitmask of the interrupts to enable
152  */
153 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
154 {
155 	u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK);
156 	u32 new_gsintmsk;
157 
158 	new_gsintmsk = gsintmsk & ~ints;
159 
160 	if (new_gsintmsk != gsintmsk)
161 		dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK);
162 }
163 
164 /**
165  * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
166  * @hsotg: The device state
167  * @ep: The endpoint index
168  * @dir_in: True if direction is in.
169  * @en: The enable value, true to enable
170  *
171  * Set or clear the mask for an individual endpoint's interrupt
172  * request.
173  */
174 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
175 				  unsigned int ep, unsigned int dir_in,
176 				 unsigned int en)
177 {
178 	unsigned long flags;
179 	u32 bit = 1 << ep;
180 	u32 daint;
181 
182 	if (!dir_in)
183 		bit <<= 16;
184 
185 	local_irq_save(flags);
186 	daint = dwc2_readl(hsotg->regs + DAINTMSK);
187 	if (en)
188 		daint |= bit;
189 	else
190 		daint &= ~bit;
191 	dwc2_writel(daint, hsotg->regs + DAINTMSK);
192 	local_irq_restore(flags);
193 }
194 
195 /**
196  * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
197  */
198 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
199 {
200 	if (hsotg->hw_params.en_multiple_tx_fifo)
201 		/* In dedicated FIFO mode we need count of IN EPs */
202 		return (dwc2_readl(hsotg->regs + GHWCFG4)  &
203 			GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
204 	else
205 		/* In shared FIFO mode we need count of Periodic IN EPs */
206 		return hsotg->hw_params.num_dev_perio_in_ep;
207 }
208 
209 /**
210  * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
211  */
212 static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
213 {
214 	int val = 0;
215 	int i;
216 	u32 ep_dirs;
217 
218 	/*
219 	 * Don't need additional space for ep info control registers in
220 	 * slave mode.
221 	 */
222 	if (!using_dma(hsotg)) {
223 		dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
224 		return 0;
225 	}
226 
227 	/*
228 	 * Buffer DMA mode - 1 location per endpoit
229 	 * Descriptor DMA mode - 4 locations per endpoint
230 	 */
231 	ep_dirs = hsotg->hw_params.dev_ep_dirs;
232 
233 	for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
234 		val += ep_dirs & 3 ? 1 : 2;
235 		ep_dirs >>= 2;
236 	}
237 
238 	if (using_desc_dma(hsotg))
239 		val = val * 4;
240 
241 	return val;
242 }
243 
244 /**
245  * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
246  * device mode TX FIFOs
247  */
248 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
249 {
250 	int ep_info_size;
251 	int addr;
252 	int tx_addr_max;
253 	u32 np_tx_fifo_size;
254 
255 	np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
256 				hsotg->params.g_np_tx_fifo_size);
257 
258 	/* Get Endpoint Info Control block size in DWORDs. */
259 	ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
260 	tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
261 
262 	addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
263 	if (tx_addr_max <= addr)
264 		return 0;
265 
266 	return tx_addr_max - addr;
267 }
268 
269 /**
270  * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
271  * TX FIFOs
272  */
273 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
274 {
275 	int tx_fifo_count;
276 	int tx_fifo_depth;
277 
278 	tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
279 
280 	tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
281 
282 	if (!tx_fifo_count)
283 		return tx_fifo_depth;
284 	else
285 		return tx_fifo_depth / tx_fifo_count;
286 }
287 
288 /**
289  * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
290  * @hsotg: The device instance.
291  */
292 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
293 {
294 	unsigned int ep;
295 	unsigned int addr;
296 	int timeout;
297 	u32 val;
298 	u32 *txfsz = hsotg->params.g_tx_fifo_size;
299 
300 	/* Reset fifo map if not correctly cleared during previous session */
301 	WARN_ON(hsotg->fifo_map);
302 	hsotg->fifo_map = 0;
303 
304 	/* set RX/NPTX FIFO sizes */
305 	dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
306 	dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
307 		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
308 		    hsotg->regs + GNPTXFSIZ);
309 
310 	/*
311 	 * arange all the rest of the TX FIFOs, as some versions of this
312 	 * block have overlapping default addresses. This also ensures
313 	 * that if the settings have been changed, then they are set to
314 	 * known values.
315 	 */
316 
317 	/* start at the end of the GNPTXFSIZ, rounded up */
318 	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
319 
320 	/*
321 	 * Configure fifos sizes from provided configuration and assign
322 	 * them to endpoints dynamically according to maxpacket size value of
323 	 * given endpoint.
324 	 */
325 	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
326 		if (!txfsz[ep])
327 			continue;
328 		val = addr;
329 		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
330 		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
331 			  "insufficient fifo memory");
332 		addr += txfsz[ep];
333 
334 		dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
335 		val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
336 	}
337 
338 	dwc2_writel(hsotg->hw_params.total_fifo_size |
339 		    addr << GDFIFOCFG_EPINFOBASE_SHIFT,
340 		    hsotg->regs + GDFIFOCFG);
341 	/*
342 	 * according to p428 of the design guide, we need to ensure that
343 	 * all fifos are flushed before continuing
344 	 */
345 
346 	dwc2_writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
347 	       GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL);
348 
349 	/* wait until the fifos are both flushed */
350 	timeout = 100;
351 	while (1) {
352 		val = dwc2_readl(hsotg->regs + GRSTCTL);
353 
354 		if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
355 			break;
356 
357 		if (--timeout == 0) {
358 			dev_err(hsotg->dev,
359 				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
360 				__func__, val);
361 			break;
362 		}
363 
364 		udelay(1);
365 	}
366 
367 	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
368 }
369 
370 /**
371  * @ep: USB endpoint to allocate request for.
372  * @flags: Allocation flags
373  *
374  * Allocate a new USB request structure appropriate for the specified endpoint
375  */
376 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
377 						       gfp_t flags)
378 {
379 	struct dwc2_hsotg_req *req;
380 
381 	req = kzalloc(sizeof(*req), flags);
382 	if (!req)
383 		return NULL;
384 
385 	INIT_LIST_HEAD(&req->queue);
386 
387 	return &req->req;
388 }
389 
390 /**
391  * is_ep_periodic - return true if the endpoint is in periodic mode.
392  * @hs_ep: The endpoint to query.
393  *
394  * Returns true if the endpoint is in periodic mode, meaning it is being
395  * used for an Interrupt or ISO transfer.
396  */
397 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
398 {
399 	return hs_ep->periodic;
400 }
401 
402 /**
403  * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
404  * @hsotg: The device state.
405  * @hs_ep: The endpoint for the request
406  * @hs_req: The request being processed.
407  *
408  * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
409  * of a request to ensure the buffer is ready for access by the caller.
410  */
411 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
412 				 struct dwc2_hsotg_ep *hs_ep,
413 				struct dwc2_hsotg_req *hs_req)
414 {
415 	struct usb_request *req = &hs_req->req;
416 
417 	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
418 }
419 
420 /*
421  * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
422  * for Control endpoint
423  * @hsotg: The device state.
424  *
425  * This function will allocate 4 descriptor chains for EP 0: 2 for
426  * Setup stage, per one for IN and OUT data/status transactions.
427  */
428 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
429 {
430 	hsotg->setup_desc[0] =
431 		dmam_alloc_coherent(hsotg->dev,
432 				    sizeof(struct dwc2_dma_desc),
433 				    &hsotg->setup_desc_dma[0],
434 				    GFP_KERNEL);
435 	if (!hsotg->setup_desc[0])
436 		goto fail;
437 
438 	hsotg->setup_desc[1] =
439 		dmam_alloc_coherent(hsotg->dev,
440 				    sizeof(struct dwc2_dma_desc),
441 				    &hsotg->setup_desc_dma[1],
442 				    GFP_KERNEL);
443 	if (!hsotg->setup_desc[1])
444 		goto fail;
445 
446 	hsotg->ctrl_in_desc =
447 		dmam_alloc_coherent(hsotg->dev,
448 				    sizeof(struct dwc2_dma_desc),
449 				    &hsotg->ctrl_in_desc_dma,
450 				    GFP_KERNEL);
451 	if (!hsotg->ctrl_in_desc)
452 		goto fail;
453 
454 	hsotg->ctrl_out_desc =
455 		dmam_alloc_coherent(hsotg->dev,
456 				    sizeof(struct dwc2_dma_desc),
457 				    &hsotg->ctrl_out_desc_dma,
458 				    GFP_KERNEL);
459 	if (!hsotg->ctrl_out_desc)
460 		goto fail;
461 
462 	return 0;
463 
464 fail:
465 	return -ENOMEM;
466 }
467 
468 /**
469  * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
470  * @hsotg: The controller state.
471  * @hs_ep: The endpoint we're going to write for.
472  * @hs_req: The request to write data for.
473  *
474  * This is called when the TxFIFO has some space in it to hold a new
475  * transmission and we have something to give it. The actual setup of
476  * the data size is done elsewhere, so all we have to do is to actually
477  * write the data.
478  *
479  * The return value is zero if there is more space (or nothing was done)
480  * otherwise -ENOSPC is returned if the FIFO space was used up.
481  *
482  * This routine is only needed for PIO
483  */
484 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
485 				 struct dwc2_hsotg_ep *hs_ep,
486 				struct dwc2_hsotg_req *hs_req)
487 {
488 	bool periodic = is_ep_periodic(hs_ep);
489 	u32 gnptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
490 	int buf_pos = hs_req->req.actual;
491 	int to_write = hs_ep->size_loaded;
492 	void *data;
493 	int can_write;
494 	int pkt_round;
495 	int max_transfer;
496 
497 	to_write -= (buf_pos - hs_ep->last_load);
498 
499 	/* if there's nothing to write, get out early */
500 	if (to_write == 0)
501 		return 0;
502 
503 	if (periodic && !hsotg->dedicated_fifos) {
504 		u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
505 		int size_left;
506 		int size_done;
507 
508 		/*
509 		 * work out how much data was loaded so we can calculate
510 		 * how much data is left in the fifo.
511 		 */
512 
513 		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
514 
515 		/*
516 		 * if shared fifo, we cannot write anything until the
517 		 * previous data has been completely sent.
518 		 */
519 		if (hs_ep->fifo_load != 0) {
520 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
521 			return -ENOSPC;
522 		}
523 
524 		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
525 			__func__, size_left,
526 			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
527 
528 		/* how much of the data has moved */
529 		size_done = hs_ep->size_loaded - size_left;
530 
531 		/* how much data is left in the fifo */
532 		can_write = hs_ep->fifo_load - size_done;
533 		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
534 			__func__, can_write);
535 
536 		can_write = hs_ep->fifo_size - can_write;
537 		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
538 			__func__, can_write);
539 
540 		if (can_write <= 0) {
541 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
542 			return -ENOSPC;
543 		}
544 	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
545 		can_write = dwc2_readl(hsotg->regs +
546 				DTXFSTS(hs_ep->fifo_index));
547 
548 		can_write &= 0xffff;
549 		can_write *= 4;
550 	} else {
551 		if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
552 			dev_dbg(hsotg->dev,
553 				"%s: no queue slots available (0x%08x)\n",
554 				__func__, gnptxsts);
555 
556 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
557 			return -ENOSPC;
558 		}
559 
560 		can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
561 		can_write *= 4;	/* fifo size is in 32bit quantities. */
562 	}
563 
564 	max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
565 
566 	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
567 		__func__, gnptxsts, can_write, to_write, max_transfer);
568 
569 	/*
570 	 * limit to 512 bytes of data, it seems at least on the non-periodic
571 	 * FIFO, requests of >512 cause the endpoint to get stuck with a
572 	 * fragment of the end of the transfer in it.
573 	 */
574 	if (can_write > 512 && !periodic)
575 		can_write = 512;
576 
577 	/*
578 	 * limit the write to one max-packet size worth of data, but allow
579 	 * the transfer to return that it did not run out of fifo space
580 	 * doing it.
581 	 */
582 	if (to_write > max_transfer) {
583 		to_write = max_transfer;
584 
585 		/* it's needed only when we do not use dedicated fifos */
586 		if (!hsotg->dedicated_fifos)
587 			dwc2_hsotg_en_gsint(hsotg,
588 					    periodic ? GINTSTS_PTXFEMP :
589 					   GINTSTS_NPTXFEMP);
590 	}
591 
592 	/* see if we can write data */
593 
594 	if (to_write > can_write) {
595 		to_write = can_write;
596 		pkt_round = to_write % max_transfer;
597 
598 		/*
599 		 * Round the write down to an
600 		 * exact number of packets.
601 		 *
602 		 * Note, we do not currently check to see if we can ever
603 		 * write a full packet or not to the FIFO.
604 		 */
605 
606 		if (pkt_round)
607 			to_write -= pkt_round;
608 
609 		/*
610 		 * enable correct FIFO interrupt to alert us when there
611 		 * is more room left.
612 		 */
613 
614 		/* it's needed only when we do not use dedicated fifos */
615 		if (!hsotg->dedicated_fifos)
616 			dwc2_hsotg_en_gsint(hsotg,
617 					    periodic ? GINTSTS_PTXFEMP :
618 					   GINTSTS_NPTXFEMP);
619 	}
620 
621 	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
622 		to_write, hs_req->req.length, can_write, buf_pos);
623 
624 	if (to_write <= 0)
625 		return -ENOSPC;
626 
627 	hs_req->req.actual = buf_pos + to_write;
628 	hs_ep->total_data += to_write;
629 
630 	if (periodic)
631 		hs_ep->fifo_load += to_write;
632 
633 	to_write = DIV_ROUND_UP(to_write, 4);
634 	data = hs_req->req.buf + buf_pos;
635 
636 	iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
637 
638 	return (to_write >= can_write) ? -ENOSPC : 0;
639 }
640 
641 /**
642  * get_ep_limit - get the maximum data legnth for this endpoint
643  * @hs_ep: The endpoint
644  *
645  * Return the maximum data that can be queued in one go on a given endpoint
646  * so that transfers that are too long can be split.
647  */
648 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
649 {
650 	int index = hs_ep->index;
651 	unsigned int maxsize;
652 	unsigned int maxpkt;
653 
654 	if (index != 0) {
655 		maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
656 		maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
657 	} else {
658 		maxsize = 64 + 64;
659 		if (hs_ep->dir_in)
660 			maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
661 		else
662 			maxpkt = 2;
663 	}
664 
665 	/* we made the constant loading easier above by using +1 */
666 	maxpkt--;
667 	maxsize--;
668 
669 	/*
670 	 * constrain by packet count if maxpkts*pktsize is greater
671 	 * than the length register size.
672 	 */
673 
674 	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
675 		maxsize = maxpkt * hs_ep->ep.maxpacket;
676 
677 	return maxsize;
678 }
679 
680 /**
681  * dwc2_hsotg_read_frameno - read current frame number
682  * @hsotg: The device instance
683  *
684  * Return the current frame number
685  */
686 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
687 {
688 	u32 dsts;
689 
690 	dsts = dwc2_readl(hsotg->regs + DSTS);
691 	dsts &= DSTS_SOFFN_MASK;
692 	dsts >>= DSTS_SOFFN_SHIFT;
693 
694 	return dsts;
695 }
696 
697 /**
698  * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
699  * DMA descriptor chain prepared for specific endpoint
700  * @hs_ep: The endpoint
701  *
702  * Return the maximum data that can be queued in one go on a given endpoint
703  * depending on its descriptor chain capacity so that transfers that
704  * are too long can be split.
705  */
706 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
707 {
708 	int is_isoc = hs_ep->isochronous;
709 	unsigned int maxsize;
710 
711 	if (is_isoc)
712 		maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
713 					   DEV_DMA_ISOC_RX_NBYTES_LIMIT;
714 	else
715 		maxsize = DEV_DMA_NBYTES_LIMIT;
716 
717 	/* Above size of one descriptor was chosen, multiple it */
718 	maxsize *= MAX_DMA_DESC_NUM_GENERIC;
719 
720 	return maxsize;
721 }
722 
723 /*
724  * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
725  * @hs_ep: The endpoint
726  * @mask: RX/TX bytes mask to be defined
727  *
728  * Returns maximum data payload for one descriptor after analyzing endpoint
729  * characteristics.
730  * DMA descriptor transfer bytes limit depends on EP type:
731  * Control out - MPS,
732  * Isochronous - descriptor rx/tx bytes bitfield limit,
733  * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
734  * have concatenations from various descriptors within one packet.
735  *
736  * Selects corresponding mask for RX/TX bytes as well.
737  */
738 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
739 {
740 	u32 mps = hs_ep->ep.maxpacket;
741 	int dir_in = hs_ep->dir_in;
742 	u32 desc_size = 0;
743 
744 	if (!hs_ep->index && !dir_in) {
745 		desc_size = mps;
746 		*mask = DEV_DMA_NBYTES_MASK;
747 	} else if (hs_ep->isochronous) {
748 		if (dir_in) {
749 			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
750 			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
751 		} else {
752 			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
753 			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
754 		}
755 	} else {
756 		desc_size = DEV_DMA_NBYTES_LIMIT;
757 		*mask = DEV_DMA_NBYTES_MASK;
758 
759 		/* Round down desc_size to be mps multiple */
760 		desc_size -= desc_size % mps;
761 	}
762 
763 	return desc_size;
764 }
765 
766 /*
767  * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
768  * @hs_ep: The endpoint
769  * @dma_buff: DMA address to use
770  * @len: Length of the transfer
771  *
772  * This function will iterate over descriptor chain and fill its entries
773  * with corresponding information based on transfer data.
774  */
775 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
776 						 dma_addr_t dma_buff,
777 						 unsigned int len)
778 {
779 	struct dwc2_hsotg *hsotg = hs_ep->parent;
780 	int dir_in = hs_ep->dir_in;
781 	struct dwc2_dma_desc *desc = hs_ep->desc_list;
782 	u32 mps = hs_ep->ep.maxpacket;
783 	u32 maxsize = 0;
784 	u32 offset = 0;
785 	u32 mask = 0;
786 	int i;
787 
788 	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
789 
790 	hs_ep->desc_count = (len / maxsize) +
791 				((len % maxsize) ? 1 : 0);
792 	if (len == 0)
793 		hs_ep->desc_count = 1;
794 
795 	for (i = 0; i < hs_ep->desc_count; ++i) {
796 		desc->status = 0;
797 		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
798 				 << DEV_DMA_BUFF_STS_SHIFT);
799 
800 		if (len > maxsize) {
801 			if (!hs_ep->index && !dir_in)
802 				desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
803 
804 			desc->status |= (maxsize <<
805 						DEV_DMA_NBYTES_SHIFT & mask);
806 			desc->buf = dma_buff + offset;
807 
808 			len -= maxsize;
809 			offset += maxsize;
810 		} else {
811 			desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
812 
813 			if (dir_in)
814 				desc->status |= (len % mps) ? DEV_DMA_SHORT :
815 					((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
816 			if (len > maxsize)
817 				dev_err(hsotg->dev, "wrong len %d\n", len);
818 
819 			desc->status |=
820 				len << DEV_DMA_NBYTES_SHIFT & mask;
821 			desc->buf = dma_buff + offset;
822 		}
823 
824 		desc->status &= ~DEV_DMA_BUFF_STS_MASK;
825 		desc->status |= (DEV_DMA_BUFF_STS_HREADY
826 				 << DEV_DMA_BUFF_STS_SHIFT);
827 		desc++;
828 	}
829 }
830 
831 /*
832  * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
833  * @hs_ep: The isochronous endpoint.
834  * @dma_buff: usb requests dma buffer.
835  * @len: usb request transfer length.
836  *
837  * Finds out index of first free entry either in the bottom or up half of
838  * descriptor chain depend on which is under SW control and not processed
839  * by HW. Then fills that descriptor with the data of the arrived usb request,
840  * frame info, sets Last and IOC bits increments next_desc. If filled
841  * descriptor is not the first one, removes L bit from the previous descriptor
842  * status.
843  */
844 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
845 				      dma_addr_t dma_buff, unsigned int len)
846 {
847 	struct dwc2_dma_desc *desc;
848 	struct dwc2_hsotg *hsotg = hs_ep->parent;
849 	u32 index;
850 	u32 maxsize = 0;
851 	u32 mask = 0;
852 
853 	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
854 	if (len > maxsize) {
855 		dev_err(hsotg->dev, "wrong len %d\n", len);
856 		return -EINVAL;
857 	}
858 
859 	/*
860 	 * If SW has already filled half of chain, then return and wait for
861 	 * the other chain to be processed by HW.
862 	 */
863 	if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
864 		return -EBUSY;
865 
866 	/* Increment frame number by interval for IN */
867 	if (hs_ep->dir_in)
868 		dwc2_gadget_incr_frame_num(hs_ep);
869 
870 	index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
871 		 hs_ep->next_desc;
872 
873 	/* Sanity check of calculated index */
874 	if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
875 	    (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
876 		dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
877 		return -EINVAL;
878 	}
879 
880 	desc = &hs_ep->desc_list[index];
881 
882 	/* Clear L bit of previous desc if more than one entries in the chain */
883 	if (hs_ep->next_desc)
884 		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
885 
886 	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
887 		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
888 
889 	desc->status = 0;
890 	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
891 
892 	desc->buf = dma_buff;
893 	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
894 			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
895 
896 	if (hs_ep->dir_in) {
897 		desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
898 				 DEV_DMA_ISOC_PID_MASK) |
899 				((len % hs_ep->ep.maxpacket) ?
900 				 DEV_DMA_SHORT : 0) |
901 				((hs_ep->target_frame <<
902 				  DEV_DMA_ISOC_FRNUM_SHIFT) &
903 				 DEV_DMA_ISOC_FRNUM_MASK);
904 	}
905 
906 	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
907 	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
908 
909 	/* Update index of last configured entry in the chain */
910 	hs_ep->next_desc++;
911 
912 	return 0;
913 }
914 
915 /*
916  * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
917  * @hs_ep: The isochronous endpoint.
918  *
919  * Prepare first descriptor chain for isochronous endpoints. Afterwards
920  * write DMA address to HW and enable the endpoint.
921  *
922  * Switch between descriptor chains via isoc_chain_num to give SW opportunity
923  * to prepare second descriptor chain while first one is being processed by HW.
924  */
925 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
926 {
927 	struct dwc2_hsotg *hsotg = hs_ep->parent;
928 	struct dwc2_hsotg_req *hs_req, *treq;
929 	int index = hs_ep->index;
930 	int ret;
931 	u32 dma_reg;
932 	u32 depctl;
933 	u32 ctrl;
934 
935 	if (list_empty(&hs_ep->queue)) {
936 		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
937 		return;
938 	}
939 
940 	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
941 		ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
942 						 hs_req->req.length);
943 		if (ret) {
944 			dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
945 			break;
946 		}
947 	}
948 
949 	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
950 	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
951 
952 	/* write descriptor chain address to control register */
953 	dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
954 
955 	ctrl = dwc2_readl(hsotg->regs + depctl);
956 	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
957 	dwc2_writel(ctrl, hsotg->regs + depctl);
958 
959 	/* Switch ISOC descriptor chain number being processed by SW*/
960 	hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
961 	hs_ep->next_desc = 0;
962 }
963 
964 /**
965  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
966  * @hsotg: The controller state.
967  * @hs_ep: The endpoint to process a request for
968  * @hs_req: The request to start.
969  * @continuing: True if we are doing more for the current request.
970  *
971  * Start the given request running by setting the endpoint registers
972  * appropriately, and writing any data to the FIFOs.
973  */
974 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
975 				 struct dwc2_hsotg_ep *hs_ep,
976 				struct dwc2_hsotg_req *hs_req,
977 				bool continuing)
978 {
979 	struct usb_request *ureq = &hs_req->req;
980 	int index = hs_ep->index;
981 	int dir_in = hs_ep->dir_in;
982 	u32 epctrl_reg;
983 	u32 epsize_reg;
984 	u32 epsize;
985 	u32 ctrl;
986 	unsigned int length;
987 	unsigned int packets;
988 	unsigned int maxreq;
989 	unsigned int dma_reg;
990 
991 	if (index != 0) {
992 		if (hs_ep->req && !continuing) {
993 			dev_err(hsotg->dev, "%s: active request\n", __func__);
994 			WARN_ON(1);
995 			return;
996 		} else if (hs_ep->req != hs_req && continuing) {
997 			dev_err(hsotg->dev,
998 				"%s: continue different req\n", __func__);
999 			WARN_ON(1);
1000 			return;
1001 		}
1002 	}
1003 
1004 	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
1005 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
1006 	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1007 
1008 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1009 		__func__, dwc2_readl(hsotg->regs + epctrl_reg), index,
1010 		hs_ep->dir_in ? "in" : "out");
1011 
1012 	/* If endpoint is stalled, we will restart request later */
1013 	ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
1014 
1015 	if (index && ctrl & DXEPCTL_STALL) {
1016 		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1017 		return;
1018 	}
1019 
1020 	length = ureq->length - ureq->actual;
1021 	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1022 		ureq->length, ureq->actual);
1023 
1024 	if (!using_desc_dma(hsotg))
1025 		maxreq = get_ep_limit(hs_ep);
1026 	else
1027 		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
1028 
1029 	if (length > maxreq) {
1030 		int round = maxreq % hs_ep->ep.maxpacket;
1031 
1032 		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1033 			__func__, length, maxreq, round);
1034 
1035 		/* round down to multiple of packets */
1036 		if (round)
1037 			maxreq -= round;
1038 
1039 		length = maxreq;
1040 	}
1041 
1042 	if (length)
1043 		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1044 	else
1045 		packets = 1;	/* send one packet if length is zero. */
1046 
1047 	if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1048 		dev_err(hsotg->dev, "req length > maxpacket*mc\n");
1049 		return;
1050 	}
1051 
1052 	if (dir_in && index != 0)
1053 		if (hs_ep->isochronous)
1054 			epsize = DXEPTSIZ_MC(packets);
1055 		else
1056 			epsize = DXEPTSIZ_MC(1);
1057 	else
1058 		epsize = 0;
1059 
1060 	/*
1061 	 * zero length packet should be programmed on its own and should not
1062 	 * be counted in DIEPTSIZ.PktCnt with other packets.
1063 	 */
1064 	if (dir_in && ureq->zero && !continuing) {
1065 		/* Test if zlp is actually required. */
1066 		if ((ureq->length >= hs_ep->ep.maxpacket) &&
1067 		    !(ureq->length % hs_ep->ep.maxpacket))
1068 			hs_ep->send_zlp = 1;
1069 	}
1070 
1071 	epsize |= DXEPTSIZ_PKTCNT(packets);
1072 	epsize |= DXEPTSIZ_XFERSIZE(length);
1073 
1074 	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1075 		__func__, packets, length, ureq->length, epsize, epsize_reg);
1076 
1077 	/* store the request as the current one we're doing */
1078 	hs_ep->req = hs_req;
1079 
1080 	if (using_desc_dma(hsotg)) {
1081 		u32 offset = 0;
1082 		u32 mps = hs_ep->ep.maxpacket;
1083 
1084 		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1085 		if (!dir_in) {
1086 			if (!index)
1087 				length = mps;
1088 			else if (length % mps)
1089 				length += (mps - (length % mps));
1090 		}
1091 
1092 		/*
1093 		 * If more data to send, adjust DMA for EP0 out data stage.
1094 		 * ureq->dma stays unchanged, hence increment it by already
1095 		 * passed passed data count before starting new transaction.
1096 		 */
1097 		if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
1098 		    continuing)
1099 			offset = ureq->actual;
1100 
1101 		/* Fill DDMA chain entries */
1102 		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1103 						     length);
1104 
1105 		/* write descriptor chain address to control register */
1106 		dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
1107 
1108 		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1109 			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
1110 	} else {
1111 		/* write size / packets */
1112 		dwc2_writel(epsize, hsotg->regs + epsize_reg);
1113 
1114 		if (using_dma(hsotg) && !continuing && (length != 0)) {
1115 			/*
1116 			 * write DMA address to control register, buffer
1117 			 * already synced by dwc2_hsotg_ep_queue().
1118 			 */
1119 
1120 			dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
1121 
1122 			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1123 				__func__, &ureq->dma, dma_reg);
1124 		}
1125 	}
1126 
1127 	if (hs_ep->isochronous && hs_ep->interval == 1) {
1128 		hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
1129 		dwc2_gadget_incr_frame_num(hs_ep);
1130 
1131 		if (hs_ep->target_frame & 0x1)
1132 			ctrl |= DXEPCTL_SETODDFR;
1133 		else
1134 			ctrl |= DXEPCTL_SETEVENFR;
1135 	}
1136 
1137 	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
1138 
1139 	dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1140 
1141 	/* For Setup request do not clear NAK */
1142 	if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1143 		ctrl |= DXEPCTL_CNAK;	/* clear NAK set by core */
1144 
1145 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1146 	dwc2_writel(ctrl, hsotg->regs + epctrl_reg);
1147 
1148 	/*
1149 	 * set these, it seems that DMA support increments past the end
1150 	 * of the packet buffer so we need to calculate the length from
1151 	 * this information.
1152 	 */
1153 	hs_ep->size_loaded = length;
1154 	hs_ep->last_load = ureq->actual;
1155 
1156 	if (dir_in && !using_dma(hsotg)) {
1157 		/* set these anyway, we may need them for non-periodic in */
1158 		hs_ep->fifo_load = 0;
1159 
1160 		dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1161 	}
1162 
1163 	/*
1164 	 * Note, trying to clear the NAK here causes problems with transmit
1165 	 * on the S3C6400 ending up with the TXFIFO becoming full.
1166 	 */
1167 
1168 	/* check ep is enabled */
1169 	if (!(dwc2_readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA))
1170 		dev_dbg(hsotg->dev,
1171 			"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
1172 			 index, dwc2_readl(hsotg->regs + epctrl_reg));
1173 
1174 	dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1175 		__func__, dwc2_readl(hsotg->regs + epctrl_reg));
1176 
1177 	/* enable ep interrupts */
1178 	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1179 }
1180 
1181 /**
1182  * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1183  * @hsotg: The device state.
1184  * @hs_ep: The endpoint the request is on.
1185  * @req: The request being processed.
1186  *
1187  * We've been asked to queue a request, so ensure that the memory buffer
1188  * is correctly setup for DMA. If we've been passed an extant DMA address
1189  * then ensure the buffer has been synced to memory. If our buffer has no
1190  * DMA memory, then we map the memory and mark our request to allow us to
1191  * cleanup on completion.
1192  */
1193 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
1194 			      struct dwc2_hsotg_ep *hs_ep,
1195 			     struct usb_request *req)
1196 {
1197 	int ret;
1198 
1199 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1200 	if (ret)
1201 		goto dma_error;
1202 
1203 	return 0;
1204 
1205 dma_error:
1206 	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1207 		__func__, req->buf, req->length);
1208 
1209 	return -EIO;
1210 }
1211 
1212 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
1213 						 struct dwc2_hsotg_ep *hs_ep,
1214 						 struct dwc2_hsotg_req *hs_req)
1215 {
1216 	void *req_buf = hs_req->req.buf;
1217 
1218 	/* If dma is not being used or buffer is aligned */
1219 	if (!using_dma(hsotg) || !((long)req_buf & 3))
1220 		return 0;
1221 
1222 	WARN_ON(hs_req->saved_req_buf);
1223 
1224 	dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1225 		hs_ep->ep.name, req_buf, hs_req->req.length);
1226 
1227 	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1228 	if (!hs_req->req.buf) {
1229 		hs_req->req.buf = req_buf;
1230 		dev_err(hsotg->dev,
1231 			"%s: unable to allocate memory for bounce buffer\n",
1232 			__func__);
1233 		return -ENOMEM;
1234 	}
1235 
1236 	/* Save actual buffer */
1237 	hs_req->saved_req_buf = req_buf;
1238 
1239 	if (hs_ep->dir_in)
1240 		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1241 	return 0;
1242 }
1243 
1244 static void
1245 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
1246 					 struct dwc2_hsotg_ep *hs_ep,
1247 					 struct dwc2_hsotg_req *hs_req)
1248 {
1249 	/* If dma is not being used or buffer was aligned */
1250 	if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1251 		return;
1252 
1253 	dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1254 		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1255 
1256 	/* Copy data from bounce buffer on successful out transfer */
1257 	if (!hs_ep->dir_in && !hs_req->req.status)
1258 		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1259 		       hs_req->req.actual);
1260 
1261 	/* Free bounce buffer */
1262 	kfree(hs_req->req.buf);
1263 
1264 	hs_req->req.buf = hs_req->saved_req_buf;
1265 	hs_req->saved_req_buf = NULL;
1266 }
1267 
1268 /**
1269  * dwc2_gadget_target_frame_elapsed - Checks target frame
1270  * @hs_ep: The driver endpoint to check
1271  *
1272  * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
1273  * corresponding transfer.
1274  */
1275 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1276 {
1277 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1278 	u32 target_frame = hs_ep->target_frame;
1279 	u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
1280 	bool frame_overrun = hs_ep->frame_overrun;
1281 
1282 	if (!frame_overrun && current_frame >= target_frame)
1283 		return true;
1284 
1285 	if (frame_overrun && current_frame >= target_frame &&
1286 	    ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
1287 		return true;
1288 
1289 	return false;
1290 }
1291 
1292 /*
1293  * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1294  * @hsotg: The driver state
1295  * @hs_ep: the ep descriptor chain is for
1296  *
1297  * Called to update EP0 structure's pointers depend on stage of
1298  * control transfer.
1299  */
1300 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
1301 					  struct dwc2_hsotg_ep *hs_ep)
1302 {
1303 	switch (hsotg->ep0_state) {
1304 	case DWC2_EP0_SETUP:
1305 	case DWC2_EP0_STATUS_OUT:
1306 		hs_ep->desc_list = hsotg->setup_desc[0];
1307 		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1308 		break;
1309 	case DWC2_EP0_DATA_IN:
1310 	case DWC2_EP0_STATUS_IN:
1311 		hs_ep->desc_list = hsotg->ctrl_in_desc;
1312 		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1313 		break;
1314 	case DWC2_EP0_DATA_OUT:
1315 		hs_ep->desc_list = hsotg->ctrl_out_desc;
1316 		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1317 		break;
1318 	default:
1319 		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1320 			hsotg->ep0_state);
1321 		return -EINVAL;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
1328 			       gfp_t gfp_flags)
1329 {
1330 	struct dwc2_hsotg_req *hs_req = our_req(req);
1331 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1332 	struct dwc2_hsotg *hs = hs_ep->parent;
1333 	bool first;
1334 	int ret;
1335 
1336 	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1337 		ep->name, req, req->length, req->buf, req->no_interrupt,
1338 		req->zero, req->short_not_ok);
1339 
1340 	/* Prevent new request submission when controller is suspended */
1341 	if (hs->lx_state == DWC2_L2) {
1342 		dev_dbg(hs->dev, "%s: don't submit request while suspended\n",
1343 			__func__);
1344 		return -EAGAIN;
1345 	}
1346 
1347 	/* initialise status of the request */
1348 	INIT_LIST_HEAD(&hs_req->queue);
1349 	req->actual = 0;
1350 	req->status = -EINPROGRESS;
1351 
1352 	ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1353 	if (ret)
1354 		return ret;
1355 
1356 	/* if we're using DMA, sync the buffers as necessary */
1357 	if (using_dma(hs)) {
1358 		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
1359 		if (ret)
1360 			return ret;
1361 	}
1362 	/* If using descriptor DMA configure EP0 descriptor chain pointers */
1363 	if (using_desc_dma(hs) && !hs_ep->index) {
1364 		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
1365 		if (ret)
1366 			return ret;
1367 	}
1368 
1369 	first = list_empty(&hs_ep->queue);
1370 	list_add_tail(&hs_req->queue, &hs_ep->queue);
1371 
1372 	/*
1373 	 * Handle DDMA isochronous transfers separately - just add new entry
1374 	 * to the half of descriptor chain that is not processed by HW.
1375 	 * Transfer will be started once SW gets either one of NAK or
1376 	 * OutTknEpDis interrupts.
1377 	 */
1378 	if (using_desc_dma(hs) && hs_ep->isochronous &&
1379 	    hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1380 		ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
1381 						 hs_req->req.length);
1382 		if (ret)
1383 			dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
1384 
1385 		return 0;
1386 	}
1387 
1388 	if (first) {
1389 		if (!hs_ep->isochronous) {
1390 			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1391 			return 0;
1392 		}
1393 
1394 		while (dwc2_gadget_target_frame_elapsed(hs_ep))
1395 			dwc2_gadget_incr_frame_num(hs_ep);
1396 
1397 		if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1398 			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1399 	}
1400 	return 0;
1401 }
1402 
1403 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
1404 				    gfp_t gfp_flags)
1405 {
1406 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1407 	struct dwc2_hsotg *hs = hs_ep->parent;
1408 	unsigned long flags = 0;
1409 	int ret = 0;
1410 
1411 	spin_lock_irqsave(&hs->lock, flags);
1412 	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
1413 	spin_unlock_irqrestore(&hs->lock, flags);
1414 
1415 	return ret;
1416 }
1417 
1418 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
1419 				       struct usb_request *req)
1420 {
1421 	struct dwc2_hsotg_req *hs_req = our_req(req);
1422 
1423 	kfree(hs_req);
1424 }
1425 
1426 /**
1427  * dwc2_hsotg_complete_oursetup - setup completion callback
1428  * @ep: The endpoint the request was on.
1429  * @req: The request completed.
1430  *
1431  * Called on completion of any requests the driver itself
1432  * submitted that need cleaning up.
1433  */
1434 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
1435 					 struct usb_request *req)
1436 {
1437 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1438 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1439 
1440 	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1441 
1442 	dwc2_hsotg_ep_free_request(ep, req);
1443 }
1444 
1445 /**
1446  * ep_from_windex - convert control wIndex value to endpoint
1447  * @hsotg: The driver state.
1448  * @windex: The control request wIndex field (in host order).
1449  *
1450  * Convert the given wIndex into a pointer to an driver endpoint
1451  * structure, or return NULL if it is not a valid endpoint.
1452  */
1453 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
1454 					    u32 windex)
1455 {
1456 	struct dwc2_hsotg_ep *ep;
1457 	int dir = (windex & USB_DIR_IN) ? 1 : 0;
1458 	int idx = windex & 0x7F;
1459 
1460 	if (windex >= 0x100)
1461 		return NULL;
1462 
1463 	if (idx > hsotg->num_of_eps)
1464 		return NULL;
1465 
1466 	ep = index_to_ep(hsotg, idx, dir);
1467 
1468 	if (idx && ep->dir_in != dir)
1469 		return NULL;
1470 
1471 	return ep;
1472 }
1473 
1474 /**
1475  * dwc2_hsotg_set_test_mode - Enable usb Test Modes
1476  * @hsotg: The driver state.
1477  * @testmode: requested usb test mode
1478  * Enable usb Test Mode requested by the Host.
1479  */
1480 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
1481 {
1482 	int dctl = dwc2_readl(hsotg->regs + DCTL);
1483 
1484 	dctl &= ~DCTL_TSTCTL_MASK;
1485 	switch (testmode) {
1486 	case TEST_J:
1487 	case TEST_K:
1488 	case TEST_SE0_NAK:
1489 	case TEST_PACKET:
1490 	case TEST_FORCE_EN:
1491 		dctl |= testmode << DCTL_TSTCTL_SHIFT;
1492 		break;
1493 	default:
1494 		return -EINVAL;
1495 	}
1496 	dwc2_writel(dctl, hsotg->regs + DCTL);
1497 	return 0;
1498 }
1499 
1500 /**
1501  * dwc2_hsotg_send_reply - send reply to control request
1502  * @hsotg: The device state
1503  * @ep: Endpoint 0
1504  * @buff: Buffer for request
1505  * @length: Length of reply.
1506  *
1507  * Create a request and queue it on the given endpoint. This is useful as
1508  * an internal method of sending replies to certain control requests, etc.
1509  */
1510 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
1511 				 struct dwc2_hsotg_ep *ep,
1512 				void *buff,
1513 				int length)
1514 {
1515 	struct usb_request *req;
1516 	int ret;
1517 
1518 	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1519 
1520 	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1521 	hsotg->ep0_reply = req;
1522 	if (!req) {
1523 		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1524 		return -ENOMEM;
1525 	}
1526 
1527 	req->buf = hsotg->ep0_buff;
1528 	req->length = length;
1529 	/*
1530 	 * zero flag is for sending zlp in DATA IN stage. It has no impact on
1531 	 * STATUS stage.
1532 	 */
1533 	req->zero = 0;
1534 	req->complete = dwc2_hsotg_complete_oursetup;
1535 
1536 	if (length)
1537 		memcpy(req->buf, buff, length);
1538 
1539 	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1540 	if (ret) {
1541 		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1542 		return ret;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 /**
1549  * dwc2_hsotg_process_req_status - process request GET_STATUS
1550  * @hsotg: The device state
1551  * @ctrl: USB control request
1552  */
1553 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1554 					 struct usb_ctrlrequest *ctrl)
1555 {
1556 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1557 	struct dwc2_hsotg_ep *ep;
1558 	__le16 reply;
1559 	int ret;
1560 
1561 	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1562 
1563 	if (!ep0->dir_in) {
1564 		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1565 		return -EINVAL;
1566 	}
1567 
1568 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1569 	case USB_RECIP_DEVICE:
1570 		/*
1571 		 * bit 0 => self powered
1572 		 * bit 1 => remote wakeup
1573 		 */
1574 		reply = cpu_to_le16(0);
1575 		break;
1576 
1577 	case USB_RECIP_INTERFACE:
1578 		/* currently, the data result should be zero */
1579 		reply = cpu_to_le16(0);
1580 		break;
1581 
1582 	case USB_RECIP_ENDPOINT:
1583 		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1584 		if (!ep)
1585 			return -ENOENT;
1586 
1587 		reply = cpu_to_le16(ep->halted ? 1 : 0);
1588 		break;
1589 
1590 	default:
1591 		return 0;
1592 	}
1593 
1594 	if (le16_to_cpu(ctrl->wLength) != 2)
1595 		return -EINVAL;
1596 
1597 	ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
1598 	if (ret) {
1599 		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1600 		return ret;
1601 	}
1602 
1603 	return 1;
1604 }
1605 
1606 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1607 
1608 /**
1609  * get_ep_head - return the first request on the endpoint
1610  * @hs_ep: The controller endpoint to get
1611  *
1612  * Get the first request on the endpoint.
1613  */
1614 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
1615 {
1616 	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
1617 					queue);
1618 }
1619 
1620 /**
1621  * dwc2_gadget_start_next_request - Starts next request from ep queue
1622  * @hs_ep: Endpoint structure
1623  *
1624  * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
1625  * in its handler. Hence we need to unmask it here to be able to do
1626  * resynchronization.
1627  */
1628 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1629 {
1630 	u32 mask;
1631 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1632 	int dir_in = hs_ep->dir_in;
1633 	struct dwc2_hsotg_req *hs_req;
1634 	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
1635 
1636 	if (!list_empty(&hs_ep->queue)) {
1637 		hs_req = get_ep_head(hs_ep);
1638 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1639 		return;
1640 	}
1641 	if (!hs_ep->isochronous)
1642 		return;
1643 
1644 	if (dir_in) {
1645 		dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
1646 			__func__);
1647 	} else {
1648 		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1649 			__func__);
1650 		mask = dwc2_readl(hsotg->regs + epmsk_reg);
1651 		mask |= DOEPMSK_OUTTKNEPDISMSK;
1652 		dwc2_writel(mask, hsotg->regs + epmsk_reg);
1653 	}
1654 }
1655 
1656 /**
1657  * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1658  * @hsotg: The device state
1659  * @ctrl: USB control request
1660  */
1661 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1662 					  struct usb_ctrlrequest *ctrl)
1663 {
1664 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1665 	struct dwc2_hsotg_req *hs_req;
1666 	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1667 	struct dwc2_hsotg_ep *ep;
1668 	int ret;
1669 	bool halted;
1670 	u32 recip;
1671 	u32 wValue;
1672 	u32 wIndex;
1673 
1674 	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1675 		__func__, set ? "SET" : "CLEAR");
1676 
1677 	wValue = le16_to_cpu(ctrl->wValue);
1678 	wIndex = le16_to_cpu(ctrl->wIndex);
1679 	recip = ctrl->bRequestType & USB_RECIP_MASK;
1680 
1681 	switch (recip) {
1682 	case USB_RECIP_DEVICE:
1683 		switch (wValue) {
1684 		case USB_DEVICE_TEST_MODE:
1685 			if ((wIndex & 0xff) != 0)
1686 				return -EINVAL;
1687 			if (!set)
1688 				return -EINVAL;
1689 
1690 			hsotg->test_mode = wIndex >> 8;
1691 			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1692 			if (ret) {
1693 				dev_err(hsotg->dev,
1694 					"%s: failed to send reply\n", __func__);
1695 				return ret;
1696 			}
1697 			break;
1698 		default:
1699 			return -ENOENT;
1700 		}
1701 		break;
1702 
1703 	case USB_RECIP_ENDPOINT:
1704 		ep = ep_from_windex(hsotg, wIndex);
1705 		if (!ep) {
1706 			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1707 				__func__, wIndex);
1708 			return -ENOENT;
1709 		}
1710 
1711 		switch (wValue) {
1712 		case USB_ENDPOINT_HALT:
1713 			halted = ep->halted;
1714 
1715 			dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1716 
1717 			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1718 			if (ret) {
1719 				dev_err(hsotg->dev,
1720 					"%s: failed to send reply\n", __func__);
1721 				return ret;
1722 			}
1723 
1724 			/*
1725 			 * we have to complete all requests for ep if it was
1726 			 * halted, and the halt was cleared by CLEAR_FEATURE
1727 			 */
1728 
1729 			if (!set && halted) {
1730 				/*
1731 				 * If we have request in progress,
1732 				 * then complete it
1733 				 */
1734 				if (ep->req) {
1735 					hs_req = ep->req;
1736 					ep->req = NULL;
1737 					list_del_init(&hs_req->queue);
1738 					if (hs_req->req.complete) {
1739 						spin_unlock(&hsotg->lock);
1740 						usb_gadget_giveback_request(
1741 							&ep->ep, &hs_req->req);
1742 						spin_lock(&hsotg->lock);
1743 					}
1744 				}
1745 
1746 				/* If we have pending request, then start it */
1747 				if (!ep->req)
1748 					dwc2_gadget_start_next_request(ep);
1749 			}
1750 
1751 			break;
1752 
1753 		default:
1754 			return -ENOENT;
1755 		}
1756 		break;
1757 	default:
1758 		return -ENOENT;
1759 	}
1760 	return 1;
1761 }
1762 
1763 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
1764 
1765 /**
1766  * dwc2_hsotg_stall_ep0 - stall ep0
1767  * @hsotg: The device state
1768  *
1769  * Set stall for ep0 as response for setup request.
1770  */
1771 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
1772 {
1773 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1774 	u32 reg;
1775 	u32 ctrl;
1776 
1777 	dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1778 	reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1779 
1780 	/*
1781 	 * DxEPCTL_Stall will be cleared by EP once it has
1782 	 * taken effect, so no need to clear later.
1783 	 */
1784 
1785 	ctrl = dwc2_readl(hsotg->regs + reg);
1786 	ctrl |= DXEPCTL_STALL;
1787 	ctrl |= DXEPCTL_CNAK;
1788 	dwc2_writel(ctrl, hsotg->regs + reg);
1789 
1790 	dev_dbg(hsotg->dev,
1791 		"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1792 		ctrl, reg, dwc2_readl(hsotg->regs + reg));
1793 
1794 	 /*
1795 	  * complete won't be called, so we enqueue
1796 	  * setup request here
1797 	  */
1798 	 dwc2_hsotg_enqueue_setup(hsotg);
1799 }
1800 
1801 /**
1802  * dwc2_hsotg_process_control - process a control request
1803  * @hsotg: The device state
1804  * @ctrl: The control request received
1805  *
1806  * The controller has received the SETUP phase of a control request, and
1807  * needs to work out what to do next (and whether to pass it on to the
1808  * gadget driver).
1809  */
1810 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
1811 				       struct usb_ctrlrequest *ctrl)
1812 {
1813 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1814 	int ret = 0;
1815 	u32 dcfg;
1816 
1817 	dev_dbg(hsotg->dev,
1818 		"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
1819 		ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
1820 		ctrl->wIndex, ctrl->wLength);
1821 
1822 	if (ctrl->wLength == 0) {
1823 		ep0->dir_in = 1;
1824 		hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1825 	} else if (ctrl->bRequestType & USB_DIR_IN) {
1826 		ep0->dir_in = 1;
1827 		hsotg->ep0_state = DWC2_EP0_DATA_IN;
1828 	} else {
1829 		ep0->dir_in = 0;
1830 		hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1831 	}
1832 
1833 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1834 		switch (ctrl->bRequest) {
1835 		case USB_REQ_SET_ADDRESS:
1836 			hsotg->connected = 1;
1837 			dcfg = dwc2_readl(hsotg->regs + DCFG);
1838 			dcfg &= ~DCFG_DEVADDR_MASK;
1839 			dcfg |= (le16_to_cpu(ctrl->wValue) <<
1840 				 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
1841 			dwc2_writel(dcfg, hsotg->regs + DCFG);
1842 
1843 			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1844 
1845 			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1846 			return;
1847 
1848 		case USB_REQ_GET_STATUS:
1849 			ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
1850 			break;
1851 
1852 		case USB_REQ_CLEAR_FEATURE:
1853 		case USB_REQ_SET_FEATURE:
1854 			ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
1855 			break;
1856 		}
1857 	}
1858 
1859 	/* as a fallback, try delivering it to the driver to deal with */
1860 
1861 	if (ret == 0 && hsotg->driver) {
1862 		spin_unlock(&hsotg->lock);
1863 		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1864 		spin_lock(&hsotg->lock);
1865 		if (ret < 0)
1866 			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1867 	}
1868 
1869 	/*
1870 	 * the request is either unhandlable, or is not formatted correctly
1871 	 * so respond with a STALL for the status stage to indicate failure.
1872 	 */
1873 
1874 	if (ret < 0)
1875 		dwc2_hsotg_stall_ep0(hsotg);
1876 }
1877 
1878 /**
1879  * dwc2_hsotg_complete_setup - completion of a setup transfer
1880  * @ep: The endpoint the request was on.
1881  * @req: The request completed.
1882  *
1883  * Called on completion of any requests the driver itself submitted for
1884  * EP0 setup packets
1885  */
1886 static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
1887 				      struct usb_request *req)
1888 {
1889 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1890 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1891 
1892 	if (req->status < 0) {
1893 		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1894 		return;
1895 	}
1896 
1897 	spin_lock(&hsotg->lock);
1898 	if (req->actual == 0)
1899 		dwc2_hsotg_enqueue_setup(hsotg);
1900 	else
1901 		dwc2_hsotg_process_control(hsotg, req->buf);
1902 	spin_unlock(&hsotg->lock);
1903 }
1904 
1905 /**
1906  * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
1907  * @hsotg: The device state.
1908  *
1909  * Enqueue a request on EP0 if necessary to received any SETUP packets
1910  * received from the host.
1911  */
1912 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
1913 {
1914 	struct usb_request *req = hsotg->ctrl_req;
1915 	struct dwc2_hsotg_req *hs_req = our_req(req);
1916 	int ret;
1917 
1918 	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1919 
1920 	req->zero = 0;
1921 	req->length = 8;
1922 	req->buf = hsotg->ctrl_buff;
1923 	req->complete = dwc2_hsotg_complete_setup;
1924 
1925 	if (!list_empty(&hs_req->queue)) {
1926 		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1927 		return;
1928 	}
1929 
1930 	hsotg->eps_out[0]->dir_in = 0;
1931 	hsotg->eps_out[0]->send_zlp = 0;
1932 	hsotg->ep0_state = DWC2_EP0_SETUP;
1933 
1934 	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
1935 	if (ret < 0) {
1936 		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
1937 		/*
1938 		 * Don't think there's much we can do other than watch the
1939 		 * driver fail.
1940 		 */
1941 	}
1942 }
1943 
1944 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
1945 				   struct dwc2_hsotg_ep *hs_ep)
1946 {
1947 	u32 ctrl;
1948 	u8 index = hs_ep->index;
1949 	u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1950 	u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1951 
1952 	if (hs_ep->dir_in)
1953 		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
1954 			index);
1955 	else
1956 		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
1957 			index);
1958 	if (using_desc_dma(hsotg)) {
1959 		/* Not specific buffer needed for ep0 ZLP */
1960 		dma_addr_t dma = hs_ep->desc_list_dma;
1961 
1962 		dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
1963 		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
1964 	} else {
1965 		dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
1966 			    DXEPTSIZ_XFERSIZE(0), hsotg->regs +
1967 			    epsiz_reg);
1968 	}
1969 
1970 	ctrl = dwc2_readl(hsotg->regs + epctl_reg);
1971 	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
1972 	ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
1973 	ctrl |= DXEPCTL_USBACTEP;
1974 	dwc2_writel(ctrl, hsotg->regs + epctl_reg);
1975 }
1976 
1977 /**
1978  * dwc2_hsotg_complete_request - complete a request given to us
1979  * @hsotg: The device state.
1980  * @hs_ep: The endpoint the request was on.
1981  * @hs_req: The request to complete.
1982  * @result: The result code (0 => Ok, otherwise errno)
1983  *
1984  * The given request has finished, so call the necessary completion
1985  * if it has one and then look to see if we can start a new request
1986  * on the endpoint.
1987  *
1988  * Note, expects the ep to already be locked as appropriate.
1989  */
1990 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1991 					struct dwc2_hsotg_ep *hs_ep,
1992 				       struct dwc2_hsotg_req *hs_req,
1993 				       int result)
1994 {
1995 	if (!hs_req) {
1996 		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1997 		return;
1998 	}
1999 
2000 	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2001 		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2002 
2003 	/*
2004 	 * only replace the status if we've not already set an error
2005 	 * from a previous transaction
2006 	 */
2007 
2008 	if (hs_req->req.status == -EINPROGRESS)
2009 		hs_req->req.status = result;
2010 
2011 	if (using_dma(hsotg))
2012 		dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2013 
2014 	dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2015 
2016 	hs_ep->req = NULL;
2017 	list_del_init(&hs_req->queue);
2018 
2019 	/*
2020 	 * call the complete request with the locks off, just in case the
2021 	 * request tries to queue more work for this endpoint.
2022 	 */
2023 
2024 	if (hs_req->req.complete) {
2025 		spin_unlock(&hsotg->lock);
2026 		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2027 		spin_lock(&hsotg->lock);
2028 	}
2029 
2030 	/* In DDMA don't need to proceed to starting of next ISOC request */
2031 	if (using_desc_dma(hsotg) && hs_ep->isochronous)
2032 		return;
2033 
2034 	/*
2035 	 * Look to see if there is anything else to do. Note, the completion
2036 	 * of the previous request may have caused a new request to be started
2037 	 * so be careful when doing this.
2038 	 */
2039 
2040 	if (!hs_ep->req && result >= 0)
2041 		dwc2_gadget_start_next_request(hs_ep);
2042 }
2043 
2044 /*
2045  * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2046  * @hs_ep: The endpoint the request was on.
2047  *
2048  * Get first request from the ep queue, determine descriptor on which complete
2049  * happened. SW based on isoc_chain_num discovers which half of the descriptor
2050  * chain is currently in use by HW, adjusts dma_address and calculates index
2051  * of completed descriptor based on the value of DEPDMA register. Update actual
2052  * length of request, giveback to gadget.
2053  */
2054 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
2055 {
2056 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2057 	struct dwc2_hsotg_req *hs_req;
2058 	struct usb_request *ureq;
2059 	int index;
2060 	dma_addr_t dma_addr;
2061 	u32 dma_reg;
2062 	u32 depdma;
2063 	u32 desc_sts;
2064 	u32 mask;
2065 
2066 	hs_req = get_ep_head(hs_ep);
2067 	if (!hs_req) {
2068 		dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2069 		return;
2070 	}
2071 	ureq = &hs_req->req;
2072 
2073 	dma_addr = hs_ep->desc_list_dma;
2074 
2075 	/*
2076 	 * If lower half of  descriptor chain is currently use by SW,
2077 	 * that means higher half is being processed by HW, so shift
2078 	 * DMA address to higher half of descriptor chain.
2079 	 */
2080 	if (!hs_ep->isoc_chain_num)
2081 		dma_addr += sizeof(struct dwc2_dma_desc) *
2082 			    (MAX_DMA_DESC_NUM_GENERIC / 2);
2083 
2084 	dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
2085 	depdma = dwc2_readl(hsotg->regs + dma_reg);
2086 
2087 	index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
2088 	desc_sts = hs_ep->desc_list[index].status;
2089 
2090 	mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2091 	       DEV_DMA_ISOC_RX_NBYTES_MASK;
2092 	ureq->actual = ureq->length -
2093 		       ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
2094 
2095 	/* Adjust actual length for ISOC Out if length is not align of 4 */
2096 	if (!hs_ep->dir_in && ureq->length & 0x3)
2097 		ureq->actual += 4 - (ureq->length & 0x3);
2098 
2099 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2100 }
2101 
2102 /*
2103  * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
2104  * @hs_ep: The isochronous endpoint to be re-enabled.
2105  *
2106  * If ep has been disabled due to last descriptor servicing (IN endpoint) or
2107  * BNA (OUT endpoint) check the status of other half of descriptor chain that
2108  * was under SW control till HW was busy and restart the endpoint if needed.
2109  */
2110 static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
2111 {
2112 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2113 	u32 depctl;
2114 	u32 dma_reg;
2115 	u32 ctrl;
2116 	u32 dma_addr = hs_ep->desc_list_dma;
2117 	unsigned char index = hs_ep->index;
2118 
2119 	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
2120 	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2121 
2122 	ctrl = dwc2_readl(hsotg->regs + depctl);
2123 
2124 	/*
2125 	 * EP was disabled if HW has processed last descriptor or BNA was set.
2126 	 * So restart ep if SW has prepared new descriptor chain in ep_queue
2127 	 * routine while HW was busy.
2128 	 */
2129 	if (!(ctrl & DXEPCTL_EPENA)) {
2130 		if (!hs_ep->next_desc) {
2131 			dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
2132 				__func__);
2133 			return;
2134 		}
2135 
2136 		dma_addr += sizeof(struct dwc2_dma_desc) *
2137 			    (MAX_DMA_DESC_NUM_GENERIC / 2) *
2138 			    hs_ep->isoc_chain_num;
2139 		dwc2_writel(dma_addr, hsotg->regs + dma_reg);
2140 
2141 		ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
2142 		dwc2_writel(ctrl, hsotg->regs + depctl);
2143 
2144 		/* Switch ISOC descriptor chain number being processed by SW*/
2145 		hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
2146 		hs_ep->next_desc = 0;
2147 
2148 		dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
2149 			__func__);
2150 	}
2151 }
2152 
2153 /**
2154  * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2155  * @hsotg: The device state.
2156  * @ep_idx: The endpoint index for the data
2157  * @size: The size of data in the fifo, in bytes
2158  *
2159  * The FIFO status shows there is data to read from the FIFO for a given
2160  * endpoint, so sort out whether we need to read the data into a request
2161  * that has been made for that endpoint.
2162  */
2163 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
2164 {
2165 	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2166 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2167 	void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
2168 	int to_read;
2169 	int max_req;
2170 	int read_ptr;
2171 
2172 	if (!hs_req) {
2173 		u32 epctl = dwc2_readl(hsotg->regs + DOEPCTL(ep_idx));
2174 		int ptr;
2175 
2176 		dev_dbg(hsotg->dev,
2177 			"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
2178 			 __func__, size, ep_idx, epctl);
2179 
2180 		/* dump the data from the FIFO, we've nothing we can do */
2181 		for (ptr = 0; ptr < size; ptr += 4)
2182 			(void)dwc2_readl(fifo);
2183 
2184 		return;
2185 	}
2186 
2187 	to_read = size;
2188 	read_ptr = hs_req->req.actual;
2189 	max_req = hs_req->req.length - read_ptr;
2190 
2191 	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2192 		__func__, to_read, max_req, read_ptr, hs_req->req.length);
2193 
2194 	if (to_read > max_req) {
2195 		/*
2196 		 * more data appeared than we where willing
2197 		 * to deal with in this request.
2198 		 */
2199 
2200 		/* currently we don't deal this */
2201 		WARN_ON_ONCE(1);
2202 	}
2203 
2204 	hs_ep->total_data += to_read;
2205 	hs_req->req.actual += to_read;
2206 	to_read = DIV_ROUND_UP(to_read, 4);
2207 
2208 	/*
2209 	 * note, we might over-write the buffer end by 3 bytes depending on
2210 	 * alignment of the data.
2211 	 */
2212 	ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
2213 }
2214 
2215 /**
2216  * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2217  * @hsotg: The device instance
2218  * @dir_in: If IN zlp
2219  *
2220  * Generate a zero-length IN packet request for terminating a SETUP
2221  * transaction.
2222  *
2223  * Note, since we don't write any data to the TxFIFO, then it is
2224  * currently believed that we do not need to wait for any space in
2225  * the TxFIFO.
2226  */
2227 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
2228 {
2229 	/* eps_out[0] is used in both directions */
2230 	hsotg->eps_out[0]->dir_in = dir_in;
2231 	hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2232 
2233 	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2234 }
2235 
2236 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
2237 					    u32 epctl_reg)
2238 {
2239 	u32 ctrl;
2240 
2241 	ctrl = dwc2_readl(hsotg->regs + epctl_reg);
2242 	if (ctrl & DXEPCTL_EOFRNUM)
2243 		ctrl |= DXEPCTL_SETEVENFR;
2244 	else
2245 		ctrl |= DXEPCTL_SETODDFR;
2246 	dwc2_writel(ctrl, hsotg->regs + epctl_reg);
2247 }
2248 
2249 /*
2250  * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2251  * @hs_ep - The endpoint on which transfer went
2252  *
2253  * Iterate over endpoints descriptor chain and get info on bytes remained
2254  * in DMA descriptors after transfer has completed. Used for non isoc EPs.
2255  */
2256 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
2257 {
2258 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2259 	unsigned int bytes_rem = 0;
2260 	struct dwc2_dma_desc *desc = hs_ep->desc_list;
2261 	int i;
2262 	u32 status;
2263 
2264 	if (!desc)
2265 		return -EINVAL;
2266 
2267 	for (i = 0; i < hs_ep->desc_count; ++i) {
2268 		status = desc->status;
2269 		bytes_rem += status & DEV_DMA_NBYTES_MASK;
2270 
2271 		if (status & DEV_DMA_STS_MASK)
2272 			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2273 				i, status & DEV_DMA_STS_MASK);
2274 	}
2275 
2276 	return bytes_rem;
2277 }
2278 
2279 /**
2280  * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2281  * @hsotg: The device instance
2282  * @epnum: The endpoint received from
2283  *
2284  * The RXFIFO has delivered an OutDone event, which means that the data
2285  * transfer for an OUT endpoint has been completed, either by a short
2286  * packet or by the finish of a transfer.
2287  */
2288 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
2289 {
2290 	u32 epsize = dwc2_readl(hsotg->regs + DOEPTSIZ(epnum));
2291 	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2292 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2293 	struct usb_request *req = &hs_req->req;
2294 	unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2295 	int result = 0;
2296 
2297 	if (!hs_req) {
2298 		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
2299 		return;
2300 	}
2301 
2302 	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2303 		dev_dbg(hsotg->dev, "zlp packet received\n");
2304 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2305 		dwc2_hsotg_enqueue_setup(hsotg);
2306 		return;
2307 	}
2308 
2309 	if (using_desc_dma(hsotg))
2310 		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2311 
2312 	if (using_dma(hsotg)) {
2313 		unsigned int size_done;
2314 
2315 		/*
2316 		 * Calculate the size of the transfer by checking how much
2317 		 * is left in the endpoint size register and then working it
2318 		 * out from the amount we loaded for the transfer.
2319 		 *
2320 		 * We need to do this as DMA pointers are always 32bit aligned
2321 		 * so may overshoot/undershoot the transfer.
2322 		 */
2323 
2324 		size_done = hs_ep->size_loaded - size_left;
2325 		size_done += hs_ep->last_load;
2326 
2327 		req->actual = size_done;
2328 	}
2329 
2330 	/* if there is more request to do, schedule new transfer */
2331 	if (req->actual < req->length && size_left == 0) {
2332 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2333 		return;
2334 	}
2335 
2336 	if (req->actual < req->length && req->short_not_ok) {
2337 		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2338 			__func__, req->actual, req->length);
2339 
2340 		/*
2341 		 * todo - what should we return here? there's no one else
2342 		 * even bothering to check the status.
2343 		 */
2344 	}
2345 
2346 	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
2347 	if (!using_desc_dma(hsotg) && epnum == 0 &&
2348 	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2349 		/* Move to STATUS IN */
2350 		dwc2_hsotg_ep0_zlp(hsotg, true);
2351 		return;
2352 	}
2353 
2354 	/*
2355 	 * Slave mode OUT transfers do not go through XferComplete so
2356 	 * adjust the ISOC parity here.
2357 	 */
2358 	if (!using_dma(hsotg)) {
2359 		if (hs_ep->isochronous && hs_ep->interval == 1)
2360 			dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
2361 		else if (hs_ep->isochronous && hs_ep->interval > 1)
2362 			dwc2_gadget_incr_frame_num(hs_ep);
2363 	}
2364 
2365 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2366 }
2367 
2368 /**
2369  * dwc2_hsotg_handle_rx - RX FIFO has data
2370  * @hsotg: The device instance
2371  *
2372  * The IRQ handler has detected that the RX FIFO has some data in it
2373  * that requires processing, so find out what is in there and do the
2374  * appropriate read.
2375  *
2376  * The RXFIFO is a true FIFO, the packets coming out are still in packet
2377  * chunks, so if you have x packets received on an endpoint you'll get x
2378  * FIFO events delivered, each with a packet's worth of data in it.
2379  *
2380  * When using DMA, we should not be processing events from the RXFIFO
2381  * as the actual data should be sent to the memory directly and we turn
2382  * on the completion interrupts to get notifications of transfer completion.
2383  */
2384 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
2385 {
2386 	u32 grxstsr = dwc2_readl(hsotg->regs + GRXSTSP);
2387 	u32 epnum, status, size;
2388 
2389 	WARN_ON(using_dma(hsotg));
2390 
2391 	epnum = grxstsr & GRXSTS_EPNUM_MASK;
2392 	status = grxstsr & GRXSTS_PKTSTS_MASK;
2393 
2394 	size = grxstsr & GRXSTS_BYTECNT_MASK;
2395 	size >>= GRXSTS_BYTECNT_SHIFT;
2396 
2397 	dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2398 		__func__, grxstsr, size, epnum);
2399 
2400 	switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
2401 	case GRXSTS_PKTSTS_GLOBALOUTNAK:
2402 		dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2403 		break;
2404 
2405 	case GRXSTS_PKTSTS_OUTDONE:
2406 		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2407 			dwc2_hsotg_read_frameno(hsotg));
2408 
2409 		if (!using_dma(hsotg))
2410 			dwc2_hsotg_handle_outdone(hsotg, epnum);
2411 		break;
2412 
2413 	case GRXSTS_PKTSTS_SETUPDONE:
2414 		dev_dbg(hsotg->dev,
2415 			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2416 			dwc2_hsotg_read_frameno(hsotg),
2417 			dwc2_readl(hsotg->regs + DOEPCTL(0)));
2418 		/*
2419 		 * Call dwc2_hsotg_handle_outdone here if it was not called from
2420 		 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
2421 		 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
2422 		 */
2423 		if (hsotg->ep0_state == DWC2_EP0_SETUP)
2424 			dwc2_hsotg_handle_outdone(hsotg, epnum);
2425 		break;
2426 
2427 	case GRXSTS_PKTSTS_OUTRX:
2428 		dwc2_hsotg_rx_data(hsotg, epnum, size);
2429 		break;
2430 
2431 	case GRXSTS_PKTSTS_SETUPRX:
2432 		dev_dbg(hsotg->dev,
2433 			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2434 			dwc2_hsotg_read_frameno(hsotg),
2435 			dwc2_readl(hsotg->regs + DOEPCTL(0)));
2436 
2437 		WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
2438 
2439 		dwc2_hsotg_rx_data(hsotg, epnum, size);
2440 		break;
2441 
2442 	default:
2443 		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
2444 			 __func__, grxstsr);
2445 
2446 		dwc2_hsotg_dump(hsotg);
2447 		break;
2448 	}
2449 }
2450 
2451 /**
2452  * dwc2_hsotg_ep0_mps - turn max packet size into register setting
2453  * @mps: The maximum packet size in bytes.
2454  */
2455 static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
2456 {
2457 	switch (mps) {
2458 	case 64:
2459 		return D0EPCTL_MPS_64;
2460 	case 32:
2461 		return D0EPCTL_MPS_32;
2462 	case 16:
2463 		return D0EPCTL_MPS_16;
2464 	case 8:
2465 		return D0EPCTL_MPS_8;
2466 	}
2467 
2468 	/* bad max packet size, warn and return invalid result */
2469 	WARN_ON(1);
2470 	return (u32)-1;
2471 }
2472 
2473 /**
2474  * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
2475  * @hsotg: The driver state.
2476  * @ep: The index number of the endpoint
2477  * @mps: The maximum packet size in bytes
2478  * @mc: The multicount value
2479  *
2480  * Configure the maximum packet size for the given endpoint, updating
2481  * the hardware control registers to reflect this.
2482  */
2483 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
2484 					unsigned int ep, unsigned int mps,
2485 					unsigned int mc, unsigned int dir_in)
2486 {
2487 	struct dwc2_hsotg_ep *hs_ep;
2488 	void __iomem *regs = hsotg->regs;
2489 	u32 reg;
2490 
2491 	hs_ep = index_to_ep(hsotg, ep, dir_in);
2492 	if (!hs_ep)
2493 		return;
2494 
2495 	if (ep == 0) {
2496 		u32 mps_bytes = mps;
2497 
2498 		/* EP0 is a special case */
2499 		mps = dwc2_hsotg_ep0_mps(mps_bytes);
2500 		if (mps > 3)
2501 			goto bad_mps;
2502 		hs_ep->ep.maxpacket = mps_bytes;
2503 		hs_ep->mc = 1;
2504 	} else {
2505 		if (mps > 1024)
2506 			goto bad_mps;
2507 		hs_ep->mc = mc;
2508 		if (mc > 3)
2509 			goto bad_mps;
2510 		hs_ep->ep.maxpacket = mps;
2511 	}
2512 
2513 	if (dir_in) {
2514 		reg = dwc2_readl(regs + DIEPCTL(ep));
2515 		reg &= ~DXEPCTL_MPS_MASK;
2516 		reg |= mps;
2517 		dwc2_writel(reg, regs + DIEPCTL(ep));
2518 	} else {
2519 		reg = dwc2_readl(regs + DOEPCTL(ep));
2520 		reg &= ~DXEPCTL_MPS_MASK;
2521 		reg |= mps;
2522 		dwc2_writel(reg, regs + DOEPCTL(ep));
2523 	}
2524 
2525 	return;
2526 
2527 bad_mps:
2528 	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
2529 }
2530 
2531 /**
2532  * dwc2_hsotg_txfifo_flush - flush Tx FIFO
2533  * @hsotg: The driver state
2534  * @idx: The index for the endpoint (0..15)
2535  */
2536 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
2537 {
2538 	int timeout;
2539 	int val;
2540 
2541 	dwc2_writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
2542 		    hsotg->regs + GRSTCTL);
2543 
2544 	/* wait until the fifo is flushed */
2545 	timeout = 100;
2546 
2547 	while (1) {
2548 		val = dwc2_readl(hsotg->regs + GRSTCTL);
2549 
2550 		if ((val & (GRSTCTL_TXFFLSH)) == 0)
2551 			break;
2552 
2553 		if (--timeout == 0) {
2554 			dev_err(hsotg->dev,
2555 				"%s: timeout flushing fifo (GRSTCTL=%08x)\n",
2556 				__func__, val);
2557 			break;
2558 		}
2559 
2560 		udelay(1);
2561 	}
2562 }
2563 
2564 /**
2565  * dwc2_hsotg_trytx - check to see if anything needs transmitting
2566  * @hsotg: The driver state
2567  * @hs_ep: The driver endpoint to check.
2568  *
2569  * Check to see if there is a request that has data to send, and if so
2570  * make an attempt to write data into the FIFO.
2571  */
2572 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
2573 			    struct dwc2_hsotg_ep *hs_ep)
2574 {
2575 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2576 
2577 	if (!hs_ep->dir_in || !hs_req) {
2578 		/**
2579 		 * if request is not enqueued, we disable interrupts
2580 		 * for endpoints, excepting ep0
2581 		 */
2582 		if (hs_ep->index != 0)
2583 			dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
2584 					      hs_ep->dir_in, 0);
2585 		return 0;
2586 	}
2587 
2588 	if (hs_req->req.actual < hs_req->req.length) {
2589 		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
2590 			hs_ep->index);
2591 		return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 /**
2598  * dwc2_hsotg_complete_in - complete IN transfer
2599  * @hsotg: The device state.
2600  * @hs_ep: The endpoint that has just completed.
2601  *
2602  * An IN transfer has been completed, update the transfer's state and then
2603  * call the relevant completion routines.
2604  */
2605 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
2606 				   struct dwc2_hsotg_ep *hs_ep)
2607 {
2608 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2609 	u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
2610 	int size_left, size_done;
2611 
2612 	if (!hs_req) {
2613 		dev_dbg(hsotg->dev, "XferCompl but no req\n");
2614 		return;
2615 	}
2616 
2617 	/* Finish ZLP handling for IN EP0 transactions */
2618 	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
2619 		dev_dbg(hsotg->dev, "zlp packet sent\n");
2620 
2621 		/*
2622 		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
2623 		 * changed to IN. Change back to complete OUT transfer request
2624 		 */
2625 		hs_ep->dir_in = 0;
2626 
2627 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2628 		if (hsotg->test_mode) {
2629 			int ret;
2630 
2631 			ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
2632 			if (ret < 0) {
2633 				dev_dbg(hsotg->dev, "Invalid Test #%d\n",
2634 					hsotg->test_mode);
2635 				dwc2_hsotg_stall_ep0(hsotg);
2636 				return;
2637 			}
2638 		}
2639 		dwc2_hsotg_enqueue_setup(hsotg);
2640 		return;
2641 	}
2642 
2643 	/*
2644 	 * Calculate the size of the transfer by checking how much is left
2645 	 * in the endpoint size register and then working it out from
2646 	 * the amount we loaded for the transfer.
2647 	 *
2648 	 * We do this even for DMA, as the transfer may have incremented
2649 	 * past the end of the buffer (DMA transfers are always 32bit
2650 	 * aligned).
2651 	 */
2652 	if (using_desc_dma(hsotg)) {
2653 		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2654 		if (size_left < 0)
2655 			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
2656 				size_left);
2657 	} else {
2658 		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2659 	}
2660 
2661 	size_done = hs_ep->size_loaded - size_left;
2662 	size_done += hs_ep->last_load;
2663 
2664 	if (hs_req->req.actual != size_done)
2665 		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
2666 			__func__, hs_req->req.actual, size_done);
2667 
2668 	hs_req->req.actual = size_done;
2669 	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
2670 		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2671 
2672 	if (!size_left && hs_req->req.actual < hs_req->req.length) {
2673 		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
2674 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2675 		return;
2676 	}
2677 
2678 	/* Zlp for all endpoints, for ep0 only in DATA IN stage */
2679 	if (hs_ep->send_zlp) {
2680 		dwc2_hsotg_program_zlp(hsotg, hs_ep);
2681 		hs_ep->send_zlp = 0;
2682 		/* transfer will be completed on next complete interrupt */
2683 		return;
2684 	}
2685 
2686 	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
2687 		/* Move to STATUS OUT */
2688 		dwc2_hsotg_ep0_zlp(hsotg, false);
2689 		return;
2690 	}
2691 
2692 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2693 }
2694 
2695 /**
2696  * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
2697  * @hsotg: The device state.
2698  * @idx: Index of ep.
2699  * @dir_in: Endpoint direction 1-in 0-out.
2700  *
2701  * Reads for endpoint with given index and direction, by masking
2702  * epint_reg with coresponding mask.
2703  */
2704 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
2705 					  unsigned int idx, int dir_in)
2706 {
2707 	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
2708 	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2709 	u32 ints;
2710 	u32 mask;
2711 	u32 diepempmsk;
2712 
2713 	mask = dwc2_readl(hsotg->regs + epmsk_reg);
2714 	diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
2715 	mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
2716 	mask |= DXEPINT_SETUP_RCVD;
2717 
2718 	ints = dwc2_readl(hsotg->regs + epint_reg);
2719 	ints &= mask;
2720 	return ints;
2721 }
2722 
2723 /**
2724  * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
2725  * @hs_ep: The endpoint on which interrupt is asserted.
2726  *
2727  * This interrupt indicates that the endpoint has been disabled per the
2728  * application's request.
2729  *
2730  * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
2731  * in case of ISOC completes current request.
2732  *
2733  * For ISOC-OUT endpoints completes expired requests. If there is remaining
2734  * request starts it.
2735  */
2736 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2737 {
2738 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2739 	struct dwc2_hsotg_req *hs_req;
2740 	unsigned char idx = hs_ep->index;
2741 	int dir_in = hs_ep->dir_in;
2742 	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2743 	int dctl = dwc2_readl(hsotg->regs + DCTL);
2744 
2745 	dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
2746 
2747 	if (dir_in) {
2748 		int epctl = dwc2_readl(hsotg->regs + epctl_reg);
2749 
2750 		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
2751 
2752 		if (hs_ep->isochronous) {
2753 			dwc2_hsotg_complete_in(hsotg, hs_ep);
2754 			return;
2755 		}
2756 
2757 		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
2758 			int dctl = dwc2_readl(hsotg->regs + DCTL);
2759 
2760 			dctl |= DCTL_CGNPINNAK;
2761 			dwc2_writel(dctl, hsotg->regs + DCTL);
2762 		}
2763 		return;
2764 	}
2765 
2766 	if (dctl & DCTL_GOUTNAKSTS) {
2767 		dctl |= DCTL_CGOUTNAK;
2768 		dwc2_writel(dctl, hsotg->regs + DCTL);
2769 	}
2770 
2771 	if (!hs_ep->isochronous)
2772 		return;
2773 
2774 	if (list_empty(&hs_ep->queue)) {
2775 		dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
2776 			__func__, hs_ep);
2777 		return;
2778 	}
2779 
2780 	do {
2781 		hs_req = get_ep_head(hs_ep);
2782 		if (hs_req)
2783 			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2784 						    -ENODATA);
2785 		dwc2_gadget_incr_frame_num(hs_ep);
2786 	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
2787 
2788 	dwc2_gadget_start_next_request(hs_ep);
2789 }
2790 
2791 /**
2792  * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
2793  * @hs_ep: The endpoint on which interrupt is asserted.
2794  *
2795  * This is starting point for ISOC-OUT transfer, synchronization done with
2796  * first out token received from host while corresponding EP is disabled.
2797  *
2798  * Device does not know initial frame in which out token will come. For this
2799  * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
2800  * getting this interrupt SW starts calculation for next transfer frame.
2801  */
2802 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2803 {
2804 	struct dwc2_hsotg *hsotg = ep->parent;
2805 	int dir_in = ep->dir_in;
2806 	u32 doepmsk;
2807 	u32 tmp;
2808 
2809 	if (dir_in || !ep->isochronous)
2810 		return;
2811 
2812 	/*
2813 	 * Store frame in which irq was asserted here, as
2814 	 * it can change while completing request below.
2815 	 */
2816 	tmp = dwc2_hsotg_read_frameno(hsotg);
2817 
2818 	dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
2819 
2820 	if (using_desc_dma(hsotg)) {
2821 		if (ep->target_frame == TARGET_FRAME_INITIAL) {
2822 			/* Start first ISO Out */
2823 			ep->target_frame = tmp;
2824 			dwc2_gadget_start_isoc_ddma(ep);
2825 		}
2826 		return;
2827 	}
2828 
2829 	if (ep->interval > 1 &&
2830 	    ep->target_frame == TARGET_FRAME_INITIAL) {
2831 		u32 dsts;
2832 		u32 ctrl;
2833 
2834 		dsts = dwc2_readl(hsotg->regs + DSTS);
2835 		ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
2836 		dwc2_gadget_incr_frame_num(ep);
2837 
2838 		ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index));
2839 		if (ep->target_frame & 0x1)
2840 			ctrl |= DXEPCTL_SETODDFR;
2841 		else
2842 			ctrl |= DXEPCTL_SETEVENFR;
2843 
2844 		dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index));
2845 	}
2846 
2847 	dwc2_gadget_start_next_request(ep);
2848 	doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
2849 	doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
2850 	dwc2_writel(doepmsk, hsotg->regs + DOEPMSK);
2851 }
2852 
2853 /**
2854  * dwc2_gadget_handle_nak - handle NAK interrupt
2855  * @hs_ep: The endpoint on which interrupt is asserted.
2856  *
2857  * This is starting point for ISOC-IN transfer, synchronization done with
2858  * first IN token received from host while corresponding EP is disabled.
2859  *
2860  * Device does not know when first one token will arrive from host. On first
2861  * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
2862  * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
2863  * sent in response to that as there was no data in FIFO. SW is basing on this
2864  * interrupt to obtain frame in which token has come and then based on the
2865  * interval calculates next frame for transfer.
2866  */
2867 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2868 {
2869 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2870 	int dir_in = hs_ep->dir_in;
2871 
2872 	if (!dir_in || !hs_ep->isochronous)
2873 		return;
2874 
2875 	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
2876 		hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
2877 
2878 		if (using_desc_dma(hsotg)) {
2879 			dwc2_gadget_start_isoc_ddma(hs_ep);
2880 			return;
2881 		}
2882 
2883 		if (hs_ep->interval > 1) {
2884 			u32 ctrl = dwc2_readl(hsotg->regs +
2885 					      DIEPCTL(hs_ep->index));
2886 			if (hs_ep->target_frame & 0x1)
2887 				ctrl |= DXEPCTL_SETODDFR;
2888 			else
2889 				ctrl |= DXEPCTL_SETEVENFR;
2890 
2891 			dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index));
2892 		}
2893 
2894 		dwc2_hsotg_complete_request(hsotg, hs_ep,
2895 					    get_ep_head(hs_ep), 0);
2896 	}
2897 
2898 	dwc2_gadget_incr_frame_num(hs_ep);
2899 }
2900 
2901 /**
2902  * dwc2_hsotg_epint - handle an in/out endpoint interrupt
2903  * @hsotg: The driver state
2904  * @idx: The index for the endpoint (0..15)
2905  * @dir_in: Set if this is an IN endpoint
2906  *
2907  * Process and clear any interrupt pending for an individual endpoint
2908  */
2909 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
2910 			     int dir_in)
2911 {
2912 	struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
2913 	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2914 	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2915 	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
2916 	u32 ints;
2917 	u32 ctrl;
2918 
2919 	ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
2920 	ctrl = dwc2_readl(hsotg->regs + epctl_reg);
2921 
2922 	/* Clear endpoint interrupts */
2923 	dwc2_writel(ints, hsotg->regs + epint_reg);
2924 
2925 	if (!hs_ep) {
2926 		dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
2927 			__func__, idx, dir_in ? "in" : "out");
2928 		return;
2929 	}
2930 
2931 	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
2932 		__func__, idx, dir_in ? "in" : "out", ints);
2933 
2934 	/* Don't process XferCompl interrupt if it is a setup packet */
2935 	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
2936 		ints &= ~DXEPINT_XFERCOMPL;
2937 
2938 	/*
2939 	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
2940 	 * stage and xfercomplete was generated without SETUP phase done
2941 	 * interrupt. SW should parse received setup packet only after host's
2942 	 * exit from setup phase of control transfer.
2943 	 */
2944 	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
2945 	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
2946 		ints &= ~DXEPINT_XFERCOMPL;
2947 
2948 	if (ints & DXEPINT_XFERCOMPL) {
2949 		dev_dbg(hsotg->dev,
2950 			"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
2951 			__func__, dwc2_readl(hsotg->regs + epctl_reg),
2952 			dwc2_readl(hsotg->regs + epsiz_reg));
2953 
2954 		/* In DDMA handle isochronous requests separately */
2955 		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
2956 			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
2957 			/* Try to start next isoc request */
2958 			dwc2_gadget_start_next_isoc_ddma(hs_ep);
2959 		} else if (dir_in) {
2960 			/*
2961 			 * We get OutDone from the FIFO, so we only
2962 			 * need to look at completing IN requests here
2963 			 * if operating slave mode
2964 			 */
2965 			if (hs_ep->isochronous && hs_ep->interval > 1)
2966 				dwc2_gadget_incr_frame_num(hs_ep);
2967 
2968 			dwc2_hsotg_complete_in(hsotg, hs_ep);
2969 			if (ints & DXEPINT_NAKINTRPT)
2970 				ints &= ~DXEPINT_NAKINTRPT;
2971 
2972 			if (idx == 0 && !hs_ep->req)
2973 				dwc2_hsotg_enqueue_setup(hsotg);
2974 		} else if (using_dma(hsotg)) {
2975 			/*
2976 			 * We're using DMA, we need to fire an OutDone here
2977 			 * as we ignore the RXFIFO.
2978 			 */
2979 			if (hs_ep->isochronous && hs_ep->interval > 1)
2980 				dwc2_gadget_incr_frame_num(hs_ep);
2981 
2982 			dwc2_hsotg_handle_outdone(hsotg, idx);
2983 		}
2984 	}
2985 
2986 	if (ints & DXEPINT_EPDISBLD)
2987 		dwc2_gadget_handle_ep_disabled(hs_ep);
2988 
2989 	if (ints & DXEPINT_OUTTKNEPDIS)
2990 		dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
2991 
2992 	if (ints & DXEPINT_NAKINTRPT)
2993 		dwc2_gadget_handle_nak(hs_ep);
2994 
2995 	if (ints & DXEPINT_AHBERR)
2996 		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
2997 
2998 	if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
2999 		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
3000 
3001 		if (using_dma(hsotg) && idx == 0) {
3002 			/*
3003 			 * this is the notification we've received a
3004 			 * setup packet. In non-DMA mode we'd get this
3005 			 * from the RXFIFO, instead we need to process
3006 			 * the setup here.
3007 			 */
3008 
3009 			if (dir_in)
3010 				WARN_ON_ONCE(1);
3011 			else
3012 				dwc2_hsotg_handle_outdone(hsotg, 0);
3013 		}
3014 	}
3015 
3016 	if (ints & DXEPINT_STSPHSERCVD) {
3017 		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3018 
3019 		/* Move to STATUS IN for DDMA */
3020 		if (using_desc_dma(hsotg))
3021 			dwc2_hsotg_ep0_zlp(hsotg, true);
3022 	}
3023 
3024 	if (ints & DXEPINT_BACK2BACKSETUP)
3025 		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3026 
3027 	if (ints & DXEPINT_BNAINTR) {
3028 		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
3029 
3030 		/*
3031 		 * Try to start next isoc request, if any.
3032 		 * Sometimes the endpoint remains enabled after BNA interrupt
3033 		 * assertion, which is not expected, hence we can enter here
3034 		 * couple of times.
3035 		 */
3036 		if (hs_ep->isochronous)
3037 			dwc2_gadget_start_next_isoc_ddma(hs_ep);
3038 	}
3039 
3040 	if (dir_in && !hs_ep->isochronous) {
3041 		/* not sure if this is important, but we'll clear it anyway */
3042 		if (ints & DXEPINT_INTKNTXFEMP) {
3043 			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3044 				__func__, idx);
3045 		}
3046 
3047 		/* this probably means something bad is happening */
3048 		if (ints & DXEPINT_INTKNEPMIS) {
3049 			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3050 				 __func__, idx);
3051 		}
3052 
3053 		/* FIFO has space or is empty (see GAHBCFG) */
3054 		if (hsotg->dedicated_fifos &&
3055 		    ints & DXEPINT_TXFEMP) {
3056 			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3057 				__func__, idx);
3058 			if (!using_dma(hsotg))
3059 				dwc2_hsotg_trytx(hsotg, hs_ep);
3060 		}
3061 	}
3062 }
3063 
3064 /**
3065  * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3066  * @hsotg: The device state.
3067  *
3068  * Handle updating the device settings after the enumeration phase has
3069  * been completed.
3070  */
3071 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
3072 {
3073 	u32 dsts = dwc2_readl(hsotg->regs + DSTS);
3074 	int ep0_mps = 0, ep_mps = 8;
3075 
3076 	/*
3077 	 * This should signal the finish of the enumeration phase
3078 	 * of the USB handshaking, so we should now know what rate
3079 	 * we connected at.
3080 	 */
3081 
3082 	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3083 
3084 	/*
3085 	 * note, since we're limited by the size of transfer on EP0, and
3086 	 * it seems IN transfers must be a even number of packets we do
3087 	 * not advertise a 64byte MPS on EP0.
3088 	 */
3089 
3090 	/* catch both EnumSpd_FS and EnumSpd_FS48 */
3091 	switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
3092 	case DSTS_ENUMSPD_FS:
3093 	case DSTS_ENUMSPD_FS48:
3094 		hsotg->gadget.speed = USB_SPEED_FULL;
3095 		ep0_mps = EP0_MPS_LIMIT;
3096 		ep_mps = 1023;
3097 		break;
3098 
3099 	case DSTS_ENUMSPD_HS:
3100 		hsotg->gadget.speed = USB_SPEED_HIGH;
3101 		ep0_mps = EP0_MPS_LIMIT;
3102 		ep_mps = 1024;
3103 		break;
3104 
3105 	case DSTS_ENUMSPD_LS:
3106 		hsotg->gadget.speed = USB_SPEED_LOW;
3107 		ep0_mps = 8;
3108 		ep_mps = 8;
3109 		/*
3110 		 * note, we don't actually support LS in this driver at the
3111 		 * moment, and the documentation seems to imply that it isn't
3112 		 * supported by the PHYs on some of the devices.
3113 		 */
3114 		break;
3115 	}
3116 	dev_info(hsotg->dev, "new device is %s\n",
3117 		 usb_speed_string(hsotg->gadget.speed));
3118 
3119 	/*
3120 	 * we should now know the maximum packet size for an
3121 	 * endpoint, so set the endpoints to a default value.
3122 	 */
3123 
3124 	if (ep0_mps) {
3125 		int i;
3126 		/* Initialize ep0 for both in and out directions */
3127 		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
3128 		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
3129 		for (i = 1; i < hsotg->num_of_eps; i++) {
3130 			if (hsotg->eps_in[i])
3131 				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3132 							    0, 1);
3133 			if (hsotg->eps_out[i])
3134 				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3135 							    0, 0);
3136 		}
3137 	}
3138 
3139 	/* ensure after enumeration our EP0 is active */
3140 
3141 	dwc2_hsotg_enqueue_setup(hsotg);
3142 
3143 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3144 		dwc2_readl(hsotg->regs + DIEPCTL0),
3145 		dwc2_readl(hsotg->regs + DOEPCTL0));
3146 }
3147 
3148 /**
3149  * kill_all_requests - remove all requests from the endpoint's queue
3150  * @hsotg: The device state.
3151  * @ep: The endpoint the requests may be on.
3152  * @result: The result code to use.
3153  *
3154  * Go through the requests on the given endpoint and mark them
3155  * completed with the given result code.
3156  */
3157 static void kill_all_requests(struct dwc2_hsotg *hsotg,
3158 			      struct dwc2_hsotg_ep *ep,
3159 			      int result)
3160 {
3161 	struct dwc2_hsotg_req *req, *treq;
3162 	unsigned int size;
3163 
3164 	ep->req = NULL;
3165 
3166 	list_for_each_entry_safe(req, treq, &ep->queue, queue)
3167 		dwc2_hsotg_complete_request(hsotg, ep, req,
3168 					    result);
3169 
3170 	if (!hsotg->dedicated_fifos)
3171 		return;
3172 	size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3173 	if (size < ep->fifo_size)
3174 		dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3175 }
3176 
3177 /**
3178  * dwc2_hsotg_disconnect - disconnect service
3179  * @hsotg: The device state.
3180  *
3181  * The device has been disconnected. Remove all current
3182  * transactions and signal the gadget driver that this
3183  * has happened.
3184  */
3185 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3186 {
3187 	unsigned int ep;
3188 
3189 	if (!hsotg->connected)
3190 		return;
3191 
3192 	hsotg->connected = 0;
3193 	hsotg->test_mode = 0;
3194 
3195 	for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3196 		if (hsotg->eps_in[ep])
3197 			kill_all_requests(hsotg, hsotg->eps_in[ep],
3198 					  -ESHUTDOWN);
3199 		if (hsotg->eps_out[ep])
3200 			kill_all_requests(hsotg, hsotg->eps_out[ep],
3201 					  -ESHUTDOWN);
3202 	}
3203 
3204 	call_gadget(hsotg, disconnect);
3205 	hsotg->lx_state = DWC2_L3;
3206 
3207 	usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3208 }
3209 
3210 /**
3211  * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3212  * @hsotg: The device state:
3213  * @periodic: True if this is a periodic FIFO interrupt
3214  */
3215 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3216 {
3217 	struct dwc2_hsotg_ep *ep;
3218 	int epno, ret;
3219 
3220 	/* look through for any more data to transmit */
3221 	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3222 		ep = index_to_ep(hsotg, epno, 1);
3223 
3224 		if (!ep)
3225 			continue;
3226 
3227 		if (!ep->dir_in)
3228 			continue;
3229 
3230 		if ((periodic && !ep->periodic) ||
3231 		    (!periodic && ep->periodic))
3232 			continue;
3233 
3234 		ret = dwc2_hsotg_trytx(hsotg, ep);
3235 		if (ret < 0)
3236 			break;
3237 	}
3238 }
3239 
3240 /* IRQ flags which will trigger a retry around the IRQ loop */
3241 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
3242 			GINTSTS_PTXFEMP |  \
3243 			GINTSTS_RXFLVL)
3244 
3245 /**
3246  * dwc2_hsotg_core_init - issue softreset to the core
3247  * @hsotg: The device state
3248  *
3249  * Issue a soft reset to the core, and await the core finishing it.
3250  */
3251 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3252 				       bool is_usb_reset)
3253 {
3254 	u32 intmsk;
3255 	u32 val;
3256 	u32 usbcfg;
3257 	u32 dcfg = 0;
3258 
3259 	/* Kill any ep0 requests as controller will be reinitialized */
3260 	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3261 
3262 	if (!is_usb_reset)
3263 		if (dwc2_core_reset(hsotg, true))
3264 			return;
3265 
3266 	/*
3267 	 * we must now enable ep0 ready for host detection and then
3268 	 * set configuration.
3269 	 */
3270 
3271 	/* keep other bits untouched (so e.g. forced modes are not lost) */
3272 	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3273 	usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3274 		GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3275 
3276 	if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
3277 	    (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
3278 	     hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
3279 		/* FS/LS Dedicated Transceiver Interface */
3280 		usbcfg |= GUSBCFG_PHYSEL;
3281 	} else {
3282 		/* set the PLL on, remove the HNP/SRP and set the PHY */
3283 		val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3284 		usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
3285 			(val << GUSBCFG_USBTRDTIM_SHIFT);
3286 	}
3287 	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
3288 
3289 	dwc2_hsotg_init_fifo(hsotg);
3290 
3291 	if (!is_usb_reset)
3292 		__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
3293 
3294 	dcfg |= DCFG_EPMISCNT(1);
3295 
3296 	switch (hsotg->params.speed) {
3297 	case DWC2_SPEED_PARAM_LOW:
3298 		dcfg |= DCFG_DEVSPD_LS;
3299 		break;
3300 	case DWC2_SPEED_PARAM_FULL:
3301 		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3302 			dcfg |= DCFG_DEVSPD_FS48;
3303 		else
3304 			dcfg |= DCFG_DEVSPD_FS;
3305 		break;
3306 	default:
3307 		dcfg |= DCFG_DEVSPD_HS;
3308 	}
3309 
3310 	dwc2_writel(dcfg,  hsotg->regs + DCFG);
3311 
3312 	/* Clear any pending OTG interrupts */
3313 	dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
3314 
3315 	/* Clear any pending interrupts */
3316 	dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
3317 	intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
3318 		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
3319 		GINTSTS_USBRST | GINTSTS_RESETDET |
3320 		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
3321 		GINTSTS_USBSUSP | GINTSTS_WKUPINT;
3322 
3323 	if (!using_desc_dma(hsotg))
3324 		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
3325 
3326 	if (!hsotg->params.external_id_pin_ctl)
3327 		intmsk |= GINTSTS_CONIDSTSCHNG;
3328 
3329 	dwc2_writel(intmsk, hsotg->regs + GINTMSK);
3330 
3331 	if (using_dma(hsotg)) {
3332 		dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
3333 			    (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
3334 			    hsotg->regs + GAHBCFG);
3335 
3336 		/* Set DDMA mode support in the core if needed */
3337 		if (using_desc_dma(hsotg))
3338 			__orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
3339 
3340 	} else {
3341 		dwc2_writel(((hsotg->dedicated_fifos) ?
3342 						(GAHBCFG_NP_TXF_EMP_LVL |
3343 						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
3344 			    GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
3345 	}
3346 
3347 	/*
3348 	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3349 	 * when we have no data to transfer. Otherwise we get being flooded by
3350 	 * interrupts.
3351 	 */
3352 
3353 	dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3354 		DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
3355 		DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
3356 		DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
3357 		hsotg->regs + DIEPMSK);
3358 
3359 	/*
3360 	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3361 	 * DMA mode we may need this and StsPhseRcvd.
3362 	 */
3363 	dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
3364 		DOEPMSK_STSPHSERCVDMSK) : 0) |
3365 		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
3366 		DOEPMSK_SETUPMSK,
3367 		hsotg->regs + DOEPMSK);
3368 
3369 	/* Enable BNA interrupt for DDMA */
3370 	if (using_desc_dma(hsotg))
3371 		__orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
3372 
3373 	dwc2_writel(0, hsotg->regs + DAINTMSK);
3374 
3375 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3376 		dwc2_readl(hsotg->regs + DIEPCTL0),
3377 		dwc2_readl(hsotg->regs + DOEPCTL0));
3378 
3379 	/* enable in and out endpoint interrupts */
3380 	dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
3381 
3382 	/*
3383 	 * Enable the RXFIFO when in slave mode, as this is how we collect
3384 	 * the data. In DMA mode, we get events from the FIFO but also
3385 	 * things we cannot process, so do not use it.
3386 	 */
3387 	if (!using_dma(hsotg))
3388 		dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
3389 
3390 	/* Enable interrupts for EP0 in and out */
3391 	dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
3392 	dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
3393 
3394 	if (!is_usb_reset) {
3395 		__orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
3396 		udelay(10);  /* see openiboot */
3397 		__bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
3398 	}
3399 
3400 	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg->regs + DCTL));
3401 
3402 	/*
3403 	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
3404 	 * writing to the EPCTL register..
3405 	 */
3406 
3407 	/* set to read 1 8byte packet */
3408 	dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
3409 	       DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0);
3410 
3411 	dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3412 	       DXEPCTL_CNAK | DXEPCTL_EPENA |
3413 	       DXEPCTL_USBACTEP,
3414 	       hsotg->regs + DOEPCTL0);
3415 
3416 	/* enable, but don't activate EP0in */
3417 	dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3418 	       DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
3419 
3420 	dwc2_hsotg_enqueue_setup(hsotg);
3421 
3422 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3423 		dwc2_readl(hsotg->regs + DIEPCTL0),
3424 		dwc2_readl(hsotg->regs + DOEPCTL0));
3425 
3426 	/* clear global NAKs */
3427 	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3428 	if (!is_usb_reset)
3429 		val |= DCTL_SFTDISCON;
3430 	__orr32(hsotg->regs + DCTL, val);
3431 
3432 	/* must be at-least 3ms to allow bus to see disconnect */
3433 	mdelay(3);
3434 
3435 	hsotg->lx_state = DWC2_L0;
3436 }
3437 
3438 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
3439 {
3440 	/* set the soft-disconnect bit */
3441 	__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
3442 }
3443 
3444 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
3445 {
3446 	/* remove the soft-disconnect and let's go */
3447 	__bic32(hsotg->regs + DCTL, DCTL_SFTDISCON);
3448 }
3449 
3450 /**
3451  * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
3452  * @hsotg: The device state:
3453  *
3454  * This interrupt indicates one of the following conditions occurred while
3455  * transmitting an ISOC transaction.
3456  * - Corrupted IN Token for ISOC EP.
3457  * - Packet not complete in FIFO.
3458  *
3459  * The following actions will be taken:
3460  * - Determine the EP
3461  * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
3462  */
3463 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3464 {
3465 	struct dwc2_hsotg_ep *hs_ep;
3466 	u32 epctrl;
3467 	u32 idx;
3468 
3469 	dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
3470 
3471 	for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
3472 		hs_ep = hsotg->eps_in[idx];
3473 		epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
3474 		if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
3475 		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3476 			epctrl |= DXEPCTL_SNAK;
3477 			epctrl |= DXEPCTL_EPDIS;
3478 			dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx));
3479 		}
3480 	}
3481 
3482 	/* Clear interrupt */
3483 	dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
3484 }
3485 
3486 /**
3487  * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
3488  * @hsotg: The device state:
3489  *
3490  * This interrupt indicates one of the following conditions occurred while
3491  * transmitting an ISOC transaction.
3492  * - Corrupted OUT Token for ISOC EP.
3493  * - Packet not complete in FIFO.
3494  *
3495  * The following actions will be taken:
3496  * - Determine the EP
3497  * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
3498  */
3499 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3500 {
3501 	u32 gintsts;
3502 	u32 gintmsk;
3503 	u32 epctrl;
3504 	struct dwc2_hsotg_ep *hs_ep;
3505 	int idx;
3506 
3507 	dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
3508 
3509 	for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
3510 		hs_ep = hsotg->eps_out[idx];
3511 		epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
3512 		if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
3513 		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3514 			/* Unmask GOUTNAKEFF interrupt */
3515 			gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3516 			gintmsk |= GINTSTS_GOUTNAKEFF;
3517 			dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
3518 
3519 			gintsts = dwc2_readl(hsotg->regs + GINTSTS);
3520 			if (!(gintsts & GINTSTS_GOUTNAKEFF))
3521 				__orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
3522 		}
3523 	}
3524 
3525 	/* Clear interrupt */
3526 	dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
3527 }
3528 
3529 /**
3530  * dwc2_hsotg_irq - handle device interrupt
3531  * @irq: The IRQ number triggered
3532  * @pw: The pw value when registered the handler.
3533  */
3534 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
3535 {
3536 	struct dwc2_hsotg *hsotg = pw;
3537 	int retry_count = 8;
3538 	u32 gintsts;
3539 	u32 gintmsk;
3540 
3541 	if (!dwc2_is_device_mode(hsotg))
3542 		return IRQ_NONE;
3543 
3544 	spin_lock(&hsotg->lock);
3545 irq_retry:
3546 	gintsts = dwc2_readl(hsotg->regs + GINTSTS);
3547 	gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3548 
3549 	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
3550 		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
3551 
3552 	gintsts &= gintmsk;
3553 
3554 	if (gintsts & GINTSTS_RESETDET) {
3555 		dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
3556 
3557 		dwc2_writel(GINTSTS_RESETDET, hsotg->regs + GINTSTS);
3558 
3559 		/* This event must be used only if controller is suspended */
3560 		if (hsotg->lx_state == DWC2_L2) {
3561 			dwc2_exit_hibernation(hsotg, true);
3562 			hsotg->lx_state = DWC2_L0;
3563 		}
3564 	}
3565 
3566 	if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
3567 		u32 usb_status = dwc2_readl(hsotg->regs + GOTGCTL);
3568 		u32 connected = hsotg->connected;
3569 
3570 		dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
3571 		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
3572 			dwc2_readl(hsotg->regs + GNPTXSTS));
3573 
3574 		dwc2_writel(GINTSTS_USBRST, hsotg->regs + GINTSTS);
3575 
3576 		/* Report disconnection if it is not already done. */
3577 		dwc2_hsotg_disconnect(hsotg);
3578 
3579 		/* Reset device address to zero */
3580 		__bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
3581 
3582 		if (usb_status & GOTGCTL_BSESVLD && connected)
3583 			dwc2_hsotg_core_init_disconnected(hsotg, true);
3584 	}
3585 
3586 	if (gintsts & GINTSTS_ENUMDONE) {
3587 		dwc2_writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
3588 
3589 		dwc2_hsotg_irq_enumdone(hsotg);
3590 	}
3591 
3592 	if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
3593 		u32 daint = dwc2_readl(hsotg->regs + DAINT);
3594 		u32 daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
3595 		u32 daint_out, daint_in;
3596 		int ep;
3597 
3598 		daint &= daintmsk;
3599 		daint_out = daint >> DAINT_OUTEP_SHIFT;
3600 		daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
3601 
3602 		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
3603 
3604 		for (ep = 0; ep < hsotg->num_of_eps && daint_out;
3605 						ep++, daint_out >>= 1) {
3606 			if (daint_out & 1)
3607 				dwc2_hsotg_epint(hsotg, ep, 0);
3608 		}
3609 
3610 		for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
3611 						ep++, daint_in >>= 1) {
3612 			if (daint_in & 1)
3613 				dwc2_hsotg_epint(hsotg, ep, 1);
3614 		}
3615 	}
3616 
3617 	/* check both FIFOs */
3618 
3619 	if (gintsts & GINTSTS_NPTXFEMP) {
3620 		dev_dbg(hsotg->dev, "NPTxFEmp\n");
3621 
3622 		/*
3623 		 * Disable the interrupt to stop it happening again
3624 		 * unless one of these endpoint routines decides that
3625 		 * it needs re-enabling
3626 		 */
3627 
3628 		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
3629 		dwc2_hsotg_irq_fifoempty(hsotg, false);
3630 	}
3631 
3632 	if (gintsts & GINTSTS_PTXFEMP) {
3633 		dev_dbg(hsotg->dev, "PTxFEmp\n");
3634 
3635 		/* See note in GINTSTS_NPTxFEmp */
3636 
3637 		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
3638 		dwc2_hsotg_irq_fifoempty(hsotg, true);
3639 	}
3640 
3641 	if (gintsts & GINTSTS_RXFLVL) {
3642 		/*
3643 		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
3644 		 * we need to retry dwc2_hsotg_handle_rx if this is still
3645 		 * set.
3646 		 */
3647 
3648 		dwc2_hsotg_handle_rx(hsotg);
3649 	}
3650 
3651 	if (gintsts & GINTSTS_ERLYSUSP) {
3652 		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
3653 		dwc2_writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS);
3654 	}
3655 
3656 	/*
3657 	 * these next two seem to crop-up occasionally causing the core
3658 	 * to shutdown the USB transfer, so try clearing them and logging
3659 	 * the occurrence.
3660 	 */
3661 
3662 	if (gintsts & GINTSTS_GOUTNAKEFF) {
3663 		u8 idx;
3664 		u32 epctrl;
3665 		u32 gintmsk;
3666 		struct dwc2_hsotg_ep *hs_ep;
3667 
3668 		/* Mask this interrupt */
3669 		gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3670 		gintmsk &= ~GINTSTS_GOUTNAKEFF;
3671 		dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
3672 
3673 		dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
3674 		for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
3675 			hs_ep = hsotg->eps_out[idx];
3676 			epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
3677 
3678 			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
3679 				epctrl |= DXEPCTL_SNAK;
3680 				epctrl |= DXEPCTL_EPDIS;
3681 				dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx));
3682 			}
3683 		}
3684 
3685 		/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
3686 	}
3687 
3688 	if (gintsts & GINTSTS_GINNAKEFF) {
3689 		dev_info(hsotg->dev, "GINNakEff triggered\n");
3690 
3691 		__orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
3692 
3693 		dwc2_hsotg_dump(hsotg);
3694 	}
3695 
3696 	if (gintsts & GINTSTS_INCOMPL_SOIN)
3697 		dwc2_gadget_handle_incomplete_isoc_in(hsotg);
3698 
3699 	if (gintsts & GINTSTS_INCOMPL_SOOUT)
3700 		dwc2_gadget_handle_incomplete_isoc_out(hsotg);
3701 
3702 	/*
3703 	 * if we've had fifo events, we should try and go around the
3704 	 * loop again to see if there's any point in returning yet.
3705 	 */
3706 
3707 	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
3708 		goto irq_retry;
3709 
3710 	spin_unlock(&hsotg->lock);
3711 
3712 	return IRQ_HANDLED;
3713 }
3714 
3715 static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
3716 				   u32 bit, u32 timeout)
3717 {
3718 	u32 i;
3719 
3720 	for (i = 0; i < timeout; i++) {
3721 		if (dwc2_readl(hs_otg->regs + reg) & bit)
3722 			return 0;
3723 		udelay(1);
3724 	}
3725 
3726 	return -ETIMEDOUT;
3727 }
3728 
3729 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
3730 				   struct dwc2_hsotg_ep *hs_ep)
3731 {
3732 	u32 epctrl_reg;
3733 	u32 epint_reg;
3734 
3735 	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
3736 		DOEPCTL(hs_ep->index);
3737 	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
3738 		DOEPINT(hs_ep->index);
3739 
3740 	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
3741 		hs_ep->name);
3742 
3743 	if (hs_ep->dir_in) {
3744 		if (hsotg->dedicated_fifos || hs_ep->periodic) {
3745 			__orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
3746 			/* Wait for Nak effect */
3747 			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
3748 						    DXEPINT_INEPNAKEFF, 100))
3749 				dev_warn(hsotg->dev,
3750 					 "%s: timeout DIEPINT.NAKEFF\n",
3751 					 __func__);
3752 		} else {
3753 			__orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK);
3754 			/* Wait for Nak effect */
3755 			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3756 						    GINTSTS_GINNAKEFF, 100))
3757 				dev_warn(hsotg->dev,
3758 					 "%s: timeout GINTSTS.GINNAKEFF\n",
3759 					 __func__);
3760 		}
3761 	} else {
3762 		if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
3763 			__orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
3764 
3765 		/* Wait for global nak to take effect */
3766 		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3767 					    GINTSTS_GOUTNAKEFF, 100))
3768 			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
3769 				 __func__);
3770 	}
3771 
3772 	/* Disable ep */
3773 	__orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
3774 
3775 	/* Wait for ep to be disabled */
3776 	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
3777 		dev_warn(hsotg->dev,
3778 			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
3779 
3780 	/* Clear EPDISBLD interrupt */
3781 	__orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
3782 
3783 	if (hs_ep->dir_in) {
3784 		unsigned short fifo_index;
3785 
3786 		if (hsotg->dedicated_fifos || hs_ep->periodic)
3787 			fifo_index = hs_ep->fifo_index;
3788 		else
3789 			fifo_index = 0;
3790 
3791 		/* Flush TX FIFO */
3792 		dwc2_flush_tx_fifo(hsotg, fifo_index);
3793 
3794 		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
3795 		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
3796 			__orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
3797 
3798 	} else {
3799 		/* Remove global NAKs */
3800 		__orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
3801 	}
3802 }
3803 
3804 /**
3805  * dwc2_hsotg_ep_enable - enable the given endpoint
3806  * @ep: The USB endpint to configure
3807  * @desc: The USB endpoint descriptor to configure with.
3808  *
3809  * This is called from the USB gadget code's usb_ep_enable().
3810  */
3811 static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
3812 				const struct usb_endpoint_descriptor *desc)
3813 {
3814 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
3815 	struct dwc2_hsotg *hsotg = hs_ep->parent;
3816 	unsigned long flags;
3817 	unsigned int index = hs_ep->index;
3818 	u32 epctrl_reg;
3819 	u32 epctrl;
3820 	u32 mps;
3821 	u32 mc;
3822 	u32 mask;
3823 	unsigned int dir_in;
3824 	unsigned int i, val, size;
3825 	int ret = 0;
3826 
3827 	dev_dbg(hsotg->dev,
3828 		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
3829 		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
3830 		desc->wMaxPacketSize, desc->bInterval);
3831 
3832 	/* not to be called for EP0 */
3833 	if (index == 0) {
3834 		dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
3835 		return -EINVAL;
3836 	}
3837 
3838 	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
3839 	if (dir_in != hs_ep->dir_in) {
3840 		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
3841 		return -EINVAL;
3842 	}
3843 
3844 	mps = usb_endpoint_maxp(desc);
3845 	mc = usb_endpoint_maxp_mult(desc);
3846 
3847 	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
3848 
3849 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3850 	epctrl = dwc2_readl(hsotg->regs + epctrl_reg);
3851 
3852 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
3853 		__func__, epctrl, epctrl_reg);
3854 
3855 	/* Allocate DMA descriptor chain for non-ctrl endpoints */
3856 	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
3857 		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
3858 			MAX_DMA_DESC_NUM_GENERIC *
3859 			sizeof(struct dwc2_dma_desc),
3860 			&hs_ep->desc_list_dma, GFP_ATOMIC);
3861 		if (!hs_ep->desc_list) {
3862 			ret = -ENOMEM;
3863 			goto error2;
3864 		}
3865 	}
3866 
3867 	spin_lock_irqsave(&hsotg->lock, flags);
3868 
3869 	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
3870 	epctrl |= DXEPCTL_MPS(mps);
3871 
3872 	/*
3873 	 * mark the endpoint as active, otherwise the core may ignore
3874 	 * transactions entirely for this endpoint
3875 	 */
3876 	epctrl |= DXEPCTL_USBACTEP;
3877 
3878 	/* update the endpoint state */
3879 	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
3880 
3881 	/* default, set to non-periodic */
3882 	hs_ep->isochronous = 0;
3883 	hs_ep->periodic = 0;
3884 	hs_ep->halted = 0;
3885 	hs_ep->interval = desc->bInterval;
3886 
3887 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
3888 	case USB_ENDPOINT_XFER_ISOC:
3889 		epctrl |= DXEPCTL_EPTYPE_ISO;
3890 		epctrl |= DXEPCTL_SETEVENFR;
3891 		hs_ep->isochronous = 1;
3892 		hs_ep->interval = 1 << (desc->bInterval - 1);
3893 		hs_ep->target_frame = TARGET_FRAME_INITIAL;
3894 		hs_ep->isoc_chain_num = 0;
3895 		hs_ep->next_desc = 0;
3896 		if (dir_in) {
3897 			hs_ep->periodic = 1;
3898 			mask = dwc2_readl(hsotg->regs + DIEPMSK);
3899 			mask |= DIEPMSK_NAKMSK;
3900 			dwc2_writel(mask, hsotg->regs + DIEPMSK);
3901 		} else {
3902 			mask = dwc2_readl(hsotg->regs + DOEPMSK);
3903 			mask |= DOEPMSK_OUTTKNEPDISMSK;
3904 			dwc2_writel(mask, hsotg->regs + DOEPMSK);
3905 		}
3906 		break;
3907 
3908 	case USB_ENDPOINT_XFER_BULK:
3909 		epctrl |= DXEPCTL_EPTYPE_BULK;
3910 		break;
3911 
3912 	case USB_ENDPOINT_XFER_INT:
3913 		if (dir_in)
3914 			hs_ep->periodic = 1;
3915 
3916 		if (hsotg->gadget.speed == USB_SPEED_HIGH)
3917 			hs_ep->interval = 1 << (desc->bInterval - 1);
3918 
3919 		epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
3920 		break;
3921 
3922 	case USB_ENDPOINT_XFER_CONTROL:
3923 		epctrl |= DXEPCTL_EPTYPE_CONTROL;
3924 		break;
3925 	}
3926 
3927 	/*
3928 	 * if the hardware has dedicated fifos, we must give each IN EP
3929 	 * a unique tx-fifo even if it is non-periodic.
3930 	 */
3931 	if (dir_in && hsotg->dedicated_fifos) {
3932 		u32 fifo_index = 0;
3933 		u32 fifo_size = UINT_MAX;
3934 
3935 		size = hs_ep->ep.maxpacket * hs_ep->mc;
3936 		for (i = 1; i < hsotg->num_of_eps; ++i) {
3937 			if (hsotg->fifo_map & (1 << i))
3938 				continue;
3939 			val = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
3940 			val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
3941 			if (val < size)
3942 				continue;
3943 			/* Search for smallest acceptable fifo */
3944 			if (val < fifo_size) {
3945 				fifo_size = val;
3946 				fifo_index = i;
3947 			}
3948 		}
3949 		if (!fifo_index) {
3950 			dev_err(hsotg->dev,
3951 				"%s: No suitable fifo found\n", __func__);
3952 			ret = -ENOMEM;
3953 			goto error1;
3954 		}
3955 		hsotg->fifo_map |= 1 << fifo_index;
3956 		epctrl |= DXEPCTL_TXFNUM(fifo_index);
3957 		hs_ep->fifo_index = fifo_index;
3958 		hs_ep->fifo_size = fifo_size;
3959 	}
3960 
3961 	/* for non control endpoints, set PID to D0 */
3962 	if (index && !hs_ep->isochronous)
3963 		epctrl |= DXEPCTL_SETD0PID;
3964 
3965 	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
3966 		__func__, epctrl);
3967 
3968 	dwc2_writel(epctrl, hsotg->regs + epctrl_reg);
3969 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
3970 		__func__, dwc2_readl(hsotg->regs + epctrl_reg));
3971 
3972 	/* enable the endpoint interrupt */
3973 	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
3974 
3975 error1:
3976 	spin_unlock_irqrestore(&hsotg->lock, flags);
3977 
3978 error2:
3979 	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
3980 		dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3981 			sizeof(struct dwc2_dma_desc),
3982 			hs_ep->desc_list, hs_ep->desc_list_dma);
3983 		hs_ep->desc_list = NULL;
3984 	}
3985 
3986 	return ret;
3987 }
3988 
3989 /**
3990  * dwc2_hsotg_ep_disable - disable given endpoint
3991  * @ep: The endpoint to disable.
3992  */
3993 static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3994 {
3995 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
3996 	struct dwc2_hsotg *hsotg = hs_ep->parent;
3997 	int dir_in = hs_ep->dir_in;
3998 	int index = hs_ep->index;
3999 	unsigned long flags;
4000 	u32 epctrl_reg;
4001 	u32 ctrl;
4002 
4003 	dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4004 
4005 	if (ep == &hsotg->eps_out[0]->ep) {
4006 		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4007 		return -EINVAL;
4008 	}
4009 
4010 	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4011 		dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4012 		return -EINVAL;
4013 	}
4014 
4015 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4016 
4017 	spin_lock_irqsave(&hsotg->lock, flags);
4018 
4019 	ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
4020 
4021 	if (ctrl & DXEPCTL_EPENA)
4022 		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4023 
4024 	ctrl &= ~DXEPCTL_EPENA;
4025 	ctrl &= ~DXEPCTL_USBACTEP;
4026 	ctrl |= DXEPCTL_SNAK;
4027 
4028 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4029 	dwc2_writel(ctrl, hsotg->regs + epctrl_reg);
4030 
4031 	/* disable endpoint interrupts */
4032 	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4033 
4034 	/* terminate all requests with shutdown */
4035 	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4036 
4037 	hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4038 	hs_ep->fifo_index = 0;
4039 	hs_ep->fifo_size = 0;
4040 
4041 	spin_unlock_irqrestore(&hsotg->lock, flags);
4042 	return 0;
4043 }
4044 
4045 /**
4046  * on_list - check request is on the given endpoint
4047  * @ep: The endpoint to check.
4048  * @test: The request to test if it is on the endpoint.
4049  */
4050 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
4051 {
4052 	struct dwc2_hsotg_req *req, *treq;
4053 
4054 	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4055 		if (req == test)
4056 			return true;
4057 	}
4058 
4059 	return false;
4060 }
4061 
4062 /**
4063  * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4064  * @ep: The endpoint to dequeue.
4065  * @req: The request to be removed from a queue.
4066  */
4067 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4068 {
4069 	struct dwc2_hsotg_req *hs_req = our_req(req);
4070 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4071 	struct dwc2_hsotg *hs = hs_ep->parent;
4072 	unsigned long flags;
4073 
4074 	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4075 
4076 	spin_lock_irqsave(&hs->lock, flags);
4077 
4078 	if (!on_list(hs_ep, hs_req)) {
4079 		spin_unlock_irqrestore(&hs->lock, flags);
4080 		return -EINVAL;
4081 	}
4082 
4083 	/* Dequeue already started request */
4084 	if (req == &hs_ep->req->req)
4085 		dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
4086 
4087 	dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4088 	spin_unlock_irqrestore(&hs->lock, flags);
4089 
4090 	return 0;
4091 }
4092 
4093 /**
4094  * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4095  * @ep: The endpoint to set halt.
4096  * @value: Set or unset the halt.
4097  * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4098  *       the endpoint is busy processing requests.
4099  *
4100  * We need to stall the endpoint immediately if request comes from set_feature
4101  * protocol command handler.
4102  */
4103 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
4104 {
4105 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4106 	struct dwc2_hsotg *hs = hs_ep->parent;
4107 	int index = hs_ep->index;
4108 	u32 epreg;
4109 	u32 epctl;
4110 	u32 xfertype;
4111 
4112 	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
4113 
4114 	if (index == 0) {
4115 		if (value)
4116 			dwc2_hsotg_stall_ep0(hs);
4117 		else
4118 			dev_warn(hs->dev,
4119 				 "%s: can't clear halt on ep0\n", __func__);
4120 		return 0;
4121 	}
4122 
4123 	if (hs_ep->isochronous) {
4124 		dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4125 		return -EINVAL;
4126 	}
4127 
4128 	if (!now && value && !list_empty(&hs_ep->queue)) {
4129 		dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
4130 			ep->name);
4131 		return -EAGAIN;
4132 	}
4133 
4134 	if (hs_ep->dir_in) {
4135 		epreg = DIEPCTL(index);
4136 		epctl = dwc2_readl(hs->regs + epreg);
4137 
4138 		if (value) {
4139 			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
4140 			if (epctl & DXEPCTL_EPENA)
4141 				epctl |= DXEPCTL_EPDIS;
4142 		} else {
4143 			epctl &= ~DXEPCTL_STALL;
4144 			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4145 			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4146 			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4147 				epctl |= DXEPCTL_SETD0PID;
4148 		}
4149 		dwc2_writel(epctl, hs->regs + epreg);
4150 	} else {
4151 		epreg = DOEPCTL(index);
4152 		epctl = dwc2_readl(hs->regs + epreg);
4153 
4154 		if (value) {
4155 			epctl |= DXEPCTL_STALL;
4156 		} else {
4157 			epctl &= ~DXEPCTL_STALL;
4158 			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4159 			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4160 			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4161 				epctl |= DXEPCTL_SETD0PID;
4162 		}
4163 		dwc2_writel(epctl, hs->regs + epreg);
4164 	}
4165 
4166 	hs_ep->halted = value;
4167 
4168 	return 0;
4169 }
4170 
4171 /**
4172  * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4173  * @ep: The endpoint to set halt.
4174  * @value: Set or unset the halt.
4175  */
4176 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
4177 {
4178 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4179 	struct dwc2_hsotg *hs = hs_ep->parent;
4180 	unsigned long flags = 0;
4181 	int ret = 0;
4182 
4183 	spin_lock_irqsave(&hs->lock, flags);
4184 	ret = dwc2_hsotg_ep_sethalt(ep, value, false);
4185 	spin_unlock_irqrestore(&hs->lock, flags);
4186 
4187 	return ret;
4188 }
4189 
4190 static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
4191 	.enable		= dwc2_hsotg_ep_enable,
4192 	.disable	= dwc2_hsotg_ep_disable,
4193 	.alloc_request	= dwc2_hsotg_ep_alloc_request,
4194 	.free_request	= dwc2_hsotg_ep_free_request,
4195 	.queue		= dwc2_hsotg_ep_queue_lock,
4196 	.dequeue	= dwc2_hsotg_ep_dequeue,
4197 	.set_halt	= dwc2_hsotg_ep_sethalt_lock,
4198 	/* note, don't believe we have any call for the fifo routines */
4199 };
4200 
4201 /**
4202  * dwc2_hsotg_init - initialize the usb core
4203  * @hsotg: The driver state
4204  */
4205 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4206 {
4207 	u32 trdtim;
4208 	u32 usbcfg;
4209 	/* unmask subset of endpoint interrupts */
4210 
4211 	dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
4212 		    DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
4213 		    hsotg->regs + DIEPMSK);
4214 
4215 	dwc2_writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
4216 		    DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
4217 		    hsotg->regs + DOEPMSK);
4218 
4219 	dwc2_writel(0, hsotg->regs + DAINTMSK);
4220 
4221 	/* Be in disconnected state until gadget is registered */
4222 	__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
4223 
4224 	/* setup fifos */
4225 
4226 	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4227 		dwc2_readl(hsotg->regs + GRXFSIZ),
4228 		dwc2_readl(hsotg->regs + GNPTXFSIZ));
4229 
4230 	dwc2_hsotg_init_fifo(hsotg);
4231 
4232 	/* keep other bits untouched (so e.g. forced modes are not lost) */
4233 	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
4234 	usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
4235 		GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
4236 
4237 	/* set the PLL on, remove the HNP/SRP and set the PHY */
4238 	trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
4239 	usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
4240 		(trdtim << GUSBCFG_USBTRDTIM_SHIFT);
4241 	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
4242 
4243 	if (using_dma(hsotg))
4244 		__orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
4245 }
4246 
4247 /**
4248  * dwc2_hsotg_udc_start - prepare the udc for work
4249  * @gadget: The usb gadget state
4250  * @driver: The usb gadget driver
4251  *
4252  * Perform initialization to prepare udc device and driver
4253  * to work.
4254  */
4255 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
4256 				struct usb_gadget_driver *driver)
4257 {
4258 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4259 	unsigned long flags;
4260 	int ret;
4261 
4262 	if (!hsotg) {
4263 		pr_err("%s: called with no device\n", __func__);
4264 		return -ENODEV;
4265 	}
4266 
4267 	if (!driver) {
4268 		dev_err(hsotg->dev, "%s: no driver\n", __func__);
4269 		return -EINVAL;
4270 	}
4271 
4272 	if (driver->max_speed < USB_SPEED_FULL)
4273 		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
4274 
4275 	if (!driver->setup) {
4276 		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
4277 		return -EINVAL;
4278 	}
4279 
4280 	WARN_ON(hsotg->driver);
4281 
4282 	driver->driver.bus = NULL;
4283 	hsotg->driver = driver;
4284 	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
4285 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4286 
4287 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
4288 		ret = dwc2_lowlevel_hw_enable(hsotg);
4289 		if (ret)
4290 			goto err;
4291 	}
4292 
4293 	if (!IS_ERR_OR_NULL(hsotg->uphy))
4294 		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
4295 
4296 	spin_lock_irqsave(&hsotg->lock, flags);
4297 	if (dwc2_hw_is_device(hsotg)) {
4298 		dwc2_hsotg_init(hsotg);
4299 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4300 	}
4301 
4302 	hsotg->enabled = 0;
4303 	spin_unlock_irqrestore(&hsotg->lock, flags);
4304 
4305 	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
4306 
4307 	return 0;
4308 
4309 err:
4310 	hsotg->driver = NULL;
4311 	return ret;
4312 }
4313 
4314 /**
4315  * dwc2_hsotg_udc_stop - stop the udc
4316  * @gadget: The usb gadget state
4317  * @driver: The usb gadget driver
4318  *
4319  * Stop udc hw block and stay tunned for future transmissions
4320  */
4321 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
4322 {
4323 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4324 	unsigned long flags = 0;
4325 	int ep;
4326 
4327 	if (!hsotg)
4328 		return -ENODEV;
4329 
4330 	/* all endpoints should be shutdown */
4331 	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
4332 		if (hsotg->eps_in[ep])
4333 			dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
4334 		if (hsotg->eps_out[ep])
4335 			dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
4336 	}
4337 
4338 	spin_lock_irqsave(&hsotg->lock, flags);
4339 
4340 	hsotg->driver = NULL;
4341 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4342 	hsotg->enabled = 0;
4343 
4344 	spin_unlock_irqrestore(&hsotg->lock, flags);
4345 
4346 	if (!IS_ERR_OR_NULL(hsotg->uphy))
4347 		otg_set_peripheral(hsotg->uphy->otg, NULL);
4348 
4349 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4350 		dwc2_lowlevel_hw_disable(hsotg);
4351 
4352 	return 0;
4353 }
4354 
4355 /**
4356  * dwc2_hsotg_gadget_getframe - read the frame number
4357  * @gadget: The usb gadget state
4358  *
4359  * Read the {micro} frame number
4360  */
4361 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
4362 {
4363 	return dwc2_hsotg_read_frameno(to_hsotg(gadget));
4364 }
4365 
4366 /**
4367  * dwc2_hsotg_pullup - connect/disconnect the USB PHY
4368  * @gadget: The usb gadget state
4369  * @is_on: Current state of the USB PHY
4370  *
4371  * Connect/Disconnect the USB PHY pullup
4372  */
4373 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
4374 {
4375 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4376 	unsigned long flags = 0;
4377 
4378 	dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
4379 		hsotg->op_state);
4380 
4381 	/* Don't modify pullup state while in host mode */
4382 	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4383 		hsotg->enabled = is_on;
4384 		return 0;
4385 	}
4386 
4387 	spin_lock_irqsave(&hsotg->lock, flags);
4388 	if (is_on) {
4389 		hsotg->enabled = 1;
4390 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4391 		dwc2_hsotg_core_connect(hsotg);
4392 	} else {
4393 		dwc2_hsotg_core_disconnect(hsotg);
4394 		dwc2_hsotg_disconnect(hsotg);
4395 		hsotg->enabled = 0;
4396 	}
4397 
4398 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4399 	spin_unlock_irqrestore(&hsotg->lock, flags);
4400 
4401 	return 0;
4402 }
4403 
4404 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
4405 {
4406 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4407 	unsigned long flags;
4408 
4409 	dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
4410 	spin_lock_irqsave(&hsotg->lock, flags);
4411 
4412 	/*
4413 	 * If controller is hibernated, it must exit from hibernation
4414 	 * before being initialized / de-initialized
4415 	 */
4416 	if (hsotg->lx_state == DWC2_L2)
4417 		dwc2_exit_hibernation(hsotg, false);
4418 
4419 	if (is_active) {
4420 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4421 
4422 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4423 		if (hsotg->enabled)
4424 			dwc2_hsotg_core_connect(hsotg);
4425 	} else {
4426 		dwc2_hsotg_core_disconnect(hsotg);
4427 		dwc2_hsotg_disconnect(hsotg);
4428 	}
4429 
4430 	spin_unlock_irqrestore(&hsotg->lock, flags);
4431 	return 0;
4432 }
4433 
4434 /**
4435  * dwc2_hsotg_vbus_draw - report bMaxPower field
4436  * @gadget: The usb gadget state
4437  * @mA: Amount of current
4438  *
4439  * Report how much power the device may consume to the phy.
4440  */
4441 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
4442 {
4443 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4444 
4445 	if (IS_ERR_OR_NULL(hsotg->uphy))
4446 		return -ENOTSUPP;
4447 	return usb_phy_set_power(hsotg->uphy, mA);
4448 }
4449 
4450 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
4451 	.get_frame	= dwc2_hsotg_gadget_getframe,
4452 	.udc_start		= dwc2_hsotg_udc_start,
4453 	.udc_stop		= dwc2_hsotg_udc_stop,
4454 	.pullup                 = dwc2_hsotg_pullup,
4455 	.vbus_session		= dwc2_hsotg_vbus_session,
4456 	.vbus_draw		= dwc2_hsotg_vbus_draw,
4457 };
4458 
4459 /**
4460  * dwc2_hsotg_initep - initialise a single endpoint
4461  * @hsotg: The device state.
4462  * @hs_ep: The endpoint to be initialised.
4463  * @epnum: The endpoint number
4464  *
4465  * Initialise the given endpoint (as part of the probe and device state
4466  * creation) to give to the gadget driver. Setup the endpoint name, any
4467  * direction information and other state that may be required.
4468  */
4469 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
4470 			      struct dwc2_hsotg_ep *hs_ep,
4471 				       int epnum,
4472 				       bool dir_in)
4473 {
4474 	char *dir;
4475 
4476 	if (epnum == 0)
4477 		dir = "";
4478 	else if (dir_in)
4479 		dir = "in";
4480 	else
4481 		dir = "out";
4482 
4483 	hs_ep->dir_in = dir_in;
4484 	hs_ep->index = epnum;
4485 
4486 	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
4487 
4488 	INIT_LIST_HEAD(&hs_ep->queue);
4489 	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
4490 
4491 	/* add to the list of endpoints known by the gadget driver */
4492 	if (epnum)
4493 		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
4494 
4495 	hs_ep->parent = hsotg;
4496 	hs_ep->ep.name = hs_ep->name;
4497 
4498 	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
4499 		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
4500 	else
4501 		usb_ep_set_maxpacket_limit(&hs_ep->ep,
4502 					   epnum ? 1024 : EP0_MPS_LIMIT);
4503 	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
4504 
4505 	if (epnum == 0) {
4506 		hs_ep->ep.caps.type_control = true;
4507 	} else {
4508 		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
4509 			hs_ep->ep.caps.type_iso = true;
4510 			hs_ep->ep.caps.type_bulk = true;
4511 		}
4512 		hs_ep->ep.caps.type_int = true;
4513 	}
4514 
4515 	if (dir_in)
4516 		hs_ep->ep.caps.dir_in = true;
4517 	else
4518 		hs_ep->ep.caps.dir_out = true;
4519 
4520 	/*
4521 	 * if we're using dma, we need to set the next-endpoint pointer
4522 	 * to be something valid.
4523 	 */
4524 
4525 	if (using_dma(hsotg)) {
4526 		u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
4527 
4528 		if (dir_in)
4529 			dwc2_writel(next, hsotg->regs + DIEPCTL(epnum));
4530 		else
4531 			dwc2_writel(next, hsotg->regs + DOEPCTL(epnum));
4532 	}
4533 }
4534 
4535 /**
4536  * dwc2_hsotg_hw_cfg - read HW configuration registers
4537  * @param: The device state
4538  *
4539  * Read the USB core HW configuration registers
4540  */
4541 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
4542 {
4543 	u32 cfg;
4544 	u32 ep_type;
4545 	u32 i;
4546 
4547 	/* check hardware configuration */
4548 
4549 	hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
4550 
4551 	/* Add ep0 */
4552 	hsotg->num_of_eps++;
4553 
4554 	hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
4555 					sizeof(struct dwc2_hsotg_ep),
4556 					GFP_KERNEL);
4557 	if (!hsotg->eps_in[0])
4558 		return -ENOMEM;
4559 	/* Same dwc2_hsotg_ep is used in both directions for ep0 */
4560 	hsotg->eps_out[0] = hsotg->eps_in[0];
4561 
4562 	cfg = hsotg->hw_params.dev_ep_dirs;
4563 	for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
4564 		ep_type = cfg & 3;
4565 		/* Direction in or both */
4566 		if (!(ep_type & 2)) {
4567 			hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
4568 				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4569 			if (!hsotg->eps_in[i])
4570 				return -ENOMEM;
4571 		}
4572 		/* Direction out or both */
4573 		if (!(ep_type & 1)) {
4574 			hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
4575 				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4576 			if (!hsotg->eps_out[i])
4577 				return -ENOMEM;
4578 		}
4579 	}
4580 
4581 	hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
4582 	hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
4583 
4584 	dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
4585 		 hsotg->num_of_eps,
4586 		 hsotg->dedicated_fifos ? "dedicated" : "shared",
4587 		 hsotg->fifo_mem);
4588 	return 0;
4589 }
4590 
4591 /**
4592  * dwc2_hsotg_dump - dump state of the udc
4593  * @param: The device state
4594  */
4595 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
4596 {
4597 #ifdef DEBUG
4598 	struct device *dev = hsotg->dev;
4599 	void __iomem *regs = hsotg->regs;
4600 	u32 val;
4601 	int idx;
4602 
4603 	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
4604 		 dwc2_readl(regs + DCFG), dwc2_readl(regs + DCTL),
4605 		 dwc2_readl(regs + DIEPMSK));
4606 
4607 	dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
4608 		 dwc2_readl(regs + GAHBCFG), dwc2_readl(regs + GHWCFG1));
4609 
4610 	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4611 		 dwc2_readl(regs + GRXFSIZ), dwc2_readl(regs + GNPTXFSIZ));
4612 
4613 	/* show periodic fifo settings */
4614 
4615 	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4616 		val = dwc2_readl(regs + DPTXFSIZN(idx));
4617 		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
4618 			 val >> FIFOSIZE_DEPTH_SHIFT,
4619 			 val & FIFOSIZE_STARTADDR_MASK);
4620 	}
4621 
4622 	for (idx = 0; idx < hsotg->num_of_eps; idx++) {
4623 		dev_info(dev,
4624 			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
4625 			 dwc2_readl(regs + DIEPCTL(idx)),
4626 			 dwc2_readl(regs + DIEPTSIZ(idx)),
4627 			 dwc2_readl(regs + DIEPDMA(idx)));
4628 
4629 		val = dwc2_readl(regs + DOEPCTL(idx));
4630 		dev_info(dev,
4631 			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
4632 			 idx, dwc2_readl(regs + DOEPCTL(idx)),
4633 			 dwc2_readl(regs + DOEPTSIZ(idx)),
4634 			 dwc2_readl(regs + DOEPDMA(idx)));
4635 	}
4636 
4637 	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
4638 		 dwc2_readl(regs + DVBUSDIS), dwc2_readl(regs + DVBUSPULSE));
4639 #endif
4640 }
4641 
4642 /**
4643  * dwc2_gadget_init - init function for gadget
4644  * @dwc2: The data structure for the DWC2 driver.
4645  * @irq: The IRQ number for the controller.
4646  */
4647 int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
4648 {
4649 	struct device *dev = hsotg->dev;
4650 	int epnum;
4651 	int ret;
4652 
4653 	/* Dump fifo information */
4654 	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
4655 		hsotg->params.g_np_tx_fifo_size);
4656 	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
4657 
4658 	hsotg->gadget.max_speed = USB_SPEED_HIGH;
4659 	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
4660 	hsotg->gadget.name = dev_name(dev);
4661 	if (hsotg->dr_mode == USB_DR_MODE_OTG)
4662 		hsotg->gadget.is_otg = 1;
4663 	else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4664 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4665 
4666 	ret = dwc2_hsotg_hw_cfg(hsotg);
4667 	if (ret) {
4668 		dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
4669 		return ret;
4670 	}
4671 
4672 	hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
4673 			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
4674 	if (!hsotg->ctrl_buff)
4675 		return -ENOMEM;
4676 
4677 	hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
4678 			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
4679 	if (!hsotg->ep0_buff)
4680 		return -ENOMEM;
4681 
4682 	if (using_desc_dma(hsotg)) {
4683 		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
4684 		if (ret < 0)
4685 			return ret;
4686 	}
4687 
4688 	ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
4689 			       dev_name(hsotg->dev), hsotg);
4690 	if (ret < 0) {
4691 		dev_err(dev, "cannot claim IRQ for gadget\n");
4692 		return ret;
4693 	}
4694 
4695 	/* hsotg->num_of_eps holds number of EPs other than ep0 */
4696 
4697 	if (hsotg->num_of_eps == 0) {
4698 		dev_err(dev, "wrong number of EPs (zero)\n");
4699 		return -EINVAL;
4700 	}
4701 
4702 	/* setup endpoint information */
4703 
4704 	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
4705 	hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
4706 
4707 	/* allocate EP0 request */
4708 
4709 	hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
4710 						     GFP_KERNEL);
4711 	if (!hsotg->ctrl_req) {
4712 		dev_err(dev, "failed to allocate ctrl req\n");
4713 		return -ENOMEM;
4714 	}
4715 
4716 	/* initialise the endpoints now the core has been initialised */
4717 	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
4718 		if (hsotg->eps_in[epnum])
4719 			dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
4720 					  epnum, 1);
4721 		if (hsotg->eps_out[epnum])
4722 			dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
4723 					  epnum, 0);
4724 	}
4725 
4726 	ret = usb_add_gadget_udc(dev, &hsotg->gadget);
4727 	if (ret)
4728 		return ret;
4729 
4730 	dwc2_hsotg_dump(hsotg);
4731 
4732 	return 0;
4733 }
4734 
4735 /**
4736  * dwc2_hsotg_remove - remove function for hsotg driver
4737  * @pdev: The platform information for the driver
4738  */
4739 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
4740 {
4741 	usb_del_gadget_udc(&hsotg->gadget);
4742 
4743 	return 0;
4744 }
4745 
4746 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
4747 {
4748 	unsigned long flags;
4749 
4750 	if (hsotg->lx_state != DWC2_L0)
4751 		return 0;
4752 
4753 	if (hsotg->driver) {
4754 		int ep;
4755 
4756 		dev_info(hsotg->dev, "suspending usb gadget %s\n",
4757 			 hsotg->driver->driver.name);
4758 
4759 		spin_lock_irqsave(&hsotg->lock, flags);
4760 		if (hsotg->enabled)
4761 			dwc2_hsotg_core_disconnect(hsotg);
4762 		dwc2_hsotg_disconnect(hsotg);
4763 		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4764 		spin_unlock_irqrestore(&hsotg->lock, flags);
4765 
4766 		for (ep = 0; ep < hsotg->num_of_eps; ep++) {
4767 			if (hsotg->eps_in[ep])
4768 				dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
4769 			if (hsotg->eps_out[ep])
4770 				dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
4771 		}
4772 	}
4773 
4774 	return 0;
4775 }
4776 
4777 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
4778 {
4779 	unsigned long flags;
4780 
4781 	if (hsotg->lx_state == DWC2_L2)
4782 		return 0;
4783 
4784 	if (hsotg->driver) {
4785 		dev_info(hsotg->dev, "resuming usb gadget %s\n",
4786 			 hsotg->driver->driver.name);
4787 
4788 		spin_lock_irqsave(&hsotg->lock, flags);
4789 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4790 		if (hsotg->enabled)
4791 			dwc2_hsotg_core_connect(hsotg);
4792 		spin_unlock_irqrestore(&hsotg->lock, flags);
4793 	}
4794 
4795 	return 0;
4796 }
4797 
4798 /**
4799  * dwc2_backup_device_registers() - Backup controller device registers.
4800  * When suspending usb bus, registers needs to be backuped
4801  * if controller power is disabled once suspended.
4802  *
4803  * @hsotg: Programming view of the DWC_otg controller
4804  */
4805 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
4806 {
4807 	struct dwc2_dregs_backup *dr;
4808 	int i;
4809 
4810 	dev_dbg(hsotg->dev, "%s\n", __func__);
4811 
4812 	/* Backup dev regs */
4813 	dr = &hsotg->dr_backup;
4814 
4815 	dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
4816 	dr->dctl = dwc2_readl(hsotg->regs + DCTL);
4817 	dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
4818 	dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
4819 	dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
4820 
4821 	for (i = 0; i < hsotg->num_of_eps; i++) {
4822 		/* Backup IN EPs */
4823 		dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
4824 
4825 		/* Ensure DATA PID is correctly configured */
4826 		if (dr->diepctl[i] & DXEPCTL_DPID)
4827 			dr->diepctl[i] |= DXEPCTL_SETD1PID;
4828 		else
4829 			dr->diepctl[i] |= DXEPCTL_SETD0PID;
4830 
4831 		dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
4832 		dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
4833 
4834 		/* Backup OUT EPs */
4835 		dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
4836 
4837 		/* Ensure DATA PID is correctly configured */
4838 		if (dr->doepctl[i] & DXEPCTL_DPID)
4839 			dr->doepctl[i] |= DXEPCTL_SETD1PID;
4840 		else
4841 			dr->doepctl[i] |= DXEPCTL_SETD0PID;
4842 
4843 		dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
4844 		dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
4845 	}
4846 	dr->valid = true;
4847 	return 0;
4848 }
4849 
4850 /**
4851  * dwc2_restore_device_registers() - Restore controller device registers.
4852  * When resuming usb bus, device registers needs to be restored
4853  * if controller power were disabled.
4854  *
4855  * @hsotg: Programming view of the DWC_otg controller
4856  */
4857 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
4858 {
4859 	struct dwc2_dregs_backup *dr;
4860 	u32 dctl;
4861 	int i;
4862 
4863 	dev_dbg(hsotg->dev, "%s\n", __func__);
4864 
4865 	/* Restore dev regs */
4866 	dr = &hsotg->dr_backup;
4867 	if (!dr->valid) {
4868 		dev_err(hsotg->dev, "%s: no device registers to restore\n",
4869 			__func__);
4870 		return -EINVAL;
4871 	}
4872 	dr->valid = false;
4873 
4874 	dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
4875 	dwc2_writel(dr->dctl, hsotg->regs + DCTL);
4876 	dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
4877 	dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
4878 	dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
4879 
4880 	for (i = 0; i < hsotg->num_of_eps; i++) {
4881 		/* Restore IN EPs */
4882 		dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
4883 		dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
4884 		dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
4885 
4886 		/* Restore OUT EPs */
4887 		dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
4888 		dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
4889 		dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
4890 	}
4891 
4892 	/* Set the Power-On Programming done bit */
4893 	dctl = dwc2_readl(hsotg->regs + DCTL);
4894 	dctl |= DCTL_PWRONPRGDONE;
4895 	dwc2_writel(dctl, hsotg->regs + DCTL);
4896 
4897 	return 0;
4898 }
4899