xref: /linux/drivers/usb/dwc2/gadget.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  *
6  * Copyright 2008 Openmoko, Inc.
7  * Copyright 2008 Simtec Electronics
8  *      Ben Dooks <ben@simtec.co.uk>
9  *      http://armlinux.simtec.co.uk/
10  *
11  * S3C USB2.0 High-speed / OtG driver
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/platform_device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/mutex.h>
21 #include <linux/seq_file.h>
22 #include <linux/delay.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 
26 #include <linux/usb/ch9.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/phy.h>
29 #include <linux/usb/composite.h>
30 
31 
32 #include "core.h"
33 #include "hw.h"
34 
35 /* conversion functions */
our_req(struct usb_request * req)36 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
37 {
38 	return container_of(req, struct dwc2_hsotg_req, req);
39 }
40 
our_ep(struct usb_ep * ep)41 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
42 {
43 	return container_of(ep, struct dwc2_hsotg_ep, ep);
44 }
45 
to_hsotg(struct usb_gadget * gadget)46 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
47 {
48 	return container_of(gadget, struct dwc2_hsotg, gadget);
49 }
50 
dwc2_set_bit(struct dwc2_hsotg * hsotg,u32 offset,u32 val)51 static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
52 {
53 	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
54 }
55 
dwc2_clear_bit(struct dwc2_hsotg * hsotg,u32 offset,u32 val)56 static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
57 {
58 	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
59 }
60 
index_to_ep(struct dwc2_hsotg * hsotg,u32 ep_index,u32 dir_in)61 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
62 						u32 ep_index, u32 dir_in)
63 {
64 	if (dir_in)
65 		return hsotg->eps_in[ep_index];
66 	else
67 		return hsotg->eps_out[ep_index];
68 }
69 
70 /* forward declaration of functions */
71 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
72 
73 /**
74  * using_dma - return the DMA status of the driver.
75  * @hsotg: The driver state.
76  *
77  * Return true if we're using DMA.
78  *
79  * Currently, we have the DMA support code worked into everywhere
80  * that needs it, but the AMBA DMA implementation in the hardware can
81  * only DMA from 32bit aligned addresses. This means that gadgets such
82  * as the CDC Ethernet cannot work as they often pass packets which are
83  * not 32bit aligned.
84  *
85  * Unfortunately the choice to use DMA or not is global to the controller
86  * and seems to be only settable when the controller is being put through
87  * a core reset. This means we either need to fix the gadgets to take
88  * account of DMA alignment, or add bounce buffers (yuerk).
89  *
90  * g_using_dma is set depending on dts flag.
91  */
using_dma(struct dwc2_hsotg * hsotg)92 static inline bool using_dma(struct dwc2_hsotg *hsotg)
93 {
94 	return hsotg->params.g_dma;
95 }
96 
97 /*
98  * using_desc_dma - return the descriptor DMA status of the driver.
99  * @hsotg: The driver state.
100  *
101  * Return true if we're using descriptor DMA.
102  */
using_desc_dma(struct dwc2_hsotg * hsotg)103 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
104 {
105 	return hsotg->params.g_dma_desc;
106 }
107 
108 /**
109  * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
110  * @hs_ep: The endpoint
111  *
112  * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
113  * If an overrun occurs it will wrap the value and set the frame_overrun flag.
114  */
dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep * hs_ep)115 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
116 {
117 	struct dwc2_hsotg *hsotg = hs_ep->parent;
118 	u16 limit = DSTS_SOFFN_LIMIT;
119 
120 	if (hsotg->gadget.speed != USB_SPEED_HIGH)
121 		limit >>= 3;
122 
123 	hs_ep->target_frame += hs_ep->interval;
124 	if (hs_ep->target_frame > limit) {
125 		hs_ep->frame_overrun = true;
126 		hs_ep->target_frame &= limit;
127 	} else {
128 		hs_ep->frame_overrun = false;
129 	}
130 }
131 
132 /**
133  * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
134  *                                    by one.
135  * @hs_ep: The endpoint.
136  *
137  * This function used in service interval based scheduling flow to calculate
138  * descriptor frame number filed value. For service interval mode frame
139  * number in descriptor should point to last (u)frame in the interval.
140  *
141  */
dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep * hs_ep)142 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
143 {
144 	struct dwc2_hsotg *hsotg = hs_ep->parent;
145 	u16 limit = DSTS_SOFFN_LIMIT;
146 
147 	if (hsotg->gadget.speed != USB_SPEED_HIGH)
148 		limit >>= 3;
149 
150 	if (hs_ep->target_frame)
151 		hs_ep->target_frame -= 1;
152 	else
153 		hs_ep->target_frame = limit;
154 }
155 
156 /**
157  * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
158  * @hsotg: The device state
159  * @ints: A bitmask of the interrupts to enable
160  */
dwc2_hsotg_en_gsint(struct dwc2_hsotg * hsotg,u32 ints)161 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
162 {
163 	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
164 	u32 new_gsintmsk;
165 
166 	new_gsintmsk = gsintmsk | ints;
167 
168 	if (new_gsintmsk != gsintmsk) {
169 		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
170 		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
171 	}
172 }
173 
174 /**
175  * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
176  * @hsotg: The device state
177  * @ints: A bitmask of the interrupts to enable
178  */
dwc2_hsotg_disable_gsint(struct dwc2_hsotg * hsotg,u32 ints)179 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
180 {
181 	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
182 	u32 new_gsintmsk;
183 
184 	new_gsintmsk = gsintmsk & ~ints;
185 
186 	if (new_gsintmsk != gsintmsk)
187 		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
188 }
189 
190 /**
191  * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
192  * @hsotg: The device state
193  * @ep: The endpoint index
194  * @dir_in: True if direction is in.
195  * @en: The enable value, true to enable
196  *
197  * Set or clear the mask for an individual endpoint's interrupt
198  * request.
199  */
dwc2_hsotg_ctrl_epint(struct dwc2_hsotg * hsotg,unsigned int ep,unsigned int dir_in,unsigned int en)200 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
201 				  unsigned int ep, unsigned int dir_in,
202 				 unsigned int en)
203 {
204 	unsigned long flags;
205 	u32 bit = 1 << ep;
206 	u32 daint;
207 
208 	if (!dir_in)
209 		bit <<= 16;
210 
211 	local_irq_save(flags);
212 	daint = dwc2_readl(hsotg, DAINTMSK);
213 	if (en)
214 		daint |= bit;
215 	else
216 		daint &= ~bit;
217 	dwc2_writel(hsotg, daint, DAINTMSK);
218 	local_irq_restore(flags);
219 }
220 
221 /**
222  * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
223  *
224  * @hsotg: Programming view of the DWC_otg controller
225  */
dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg * hsotg)226 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
227 {
228 	if (hsotg->hw_params.en_multiple_tx_fifo)
229 		/* In dedicated FIFO mode we need count of IN EPs */
230 		return hsotg->hw_params.num_dev_in_eps;
231 	else
232 		/* In shared FIFO mode we need count of Periodic IN EPs */
233 		return hsotg->hw_params.num_dev_perio_in_ep;
234 }
235 
236 /**
237  * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
238  * device mode TX FIFOs
239  *
240  * @hsotg: Programming view of the DWC_otg controller
241  */
dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg * hsotg)242 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
243 {
244 	int addr;
245 	int tx_addr_max;
246 	u32 np_tx_fifo_size;
247 
248 	np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
249 				hsotg->params.g_np_tx_fifo_size);
250 
251 	/* Get Endpoint Info Control block size in DWORDs. */
252 	tx_addr_max = hsotg->hw_params.total_fifo_size;
253 
254 	addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
255 	if (tx_addr_max <= addr)
256 		return 0;
257 
258 	return tx_addr_max - addr;
259 }
260 
261 /**
262  * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
263  *
264  * @hsotg: Programming view of the DWC_otg controller
265  *
266  */
dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg * hsotg)267 static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
268 {
269 	u32 gintsts2;
270 	u32 gintmsk2;
271 
272 	gintsts2 = dwc2_readl(hsotg, GINTSTS2);
273 	gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
274 	gintsts2 &= gintmsk2;
275 
276 	if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
277 		dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
278 		dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
279 		dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
280 	}
281 }
282 
283 /**
284  * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
285  * TX FIFOs
286  *
287  * @hsotg: Programming view of the DWC_otg controller
288  */
dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg * hsotg)289 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
290 {
291 	int tx_fifo_count;
292 	int tx_fifo_depth;
293 
294 	tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
295 
296 	tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
297 
298 	if (!tx_fifo_count)
299 		return tx_fifo_depth;
300 	else
301 		return tx_fifo_depth / tx_fifo_count;
302 }
303 
304 /**
305  * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
306  * @hsotg: The device instance.
307  */
dwc2_hsotg_init_fifo(struct dwc2_hsotg * hsotg)308 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
309 {
310 	unsigned int ep;
311 	unsigned int addr;
312 	int timeout;
313 
314 	u32 val;
315 	u32 *txfsz = hsotg->params.g_tx_fifo_size;
316 
317 	/* Reset fifo map if not correctly cleared during previous session */
318 	WARN_ON(hsotg->fifo_map);
319 	hsotg->fifo_map = 0;
320 
321 	/* set RX/NPTX FIFO sizes */
322 	dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
323 	dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
324 		    FIFOSIZE_STARTADDR_SHIFT) |
325 		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
326 		    GNPTXFSIZ);
327 
328 	/*
329 	 * arange all the rest of the TX FIFOs, as some versions of this
330 	 * block have overlapping default addresses. This also ensures
331 	 * that if the settings have been changed, then they are set to
332 	 * known values.
333 	 */
334 
335 	/* start at the end of the GNPTXFSIZ, rounded up */
336 	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
337 
338 	/*
339 	 * Configure fifos sizes from provided configuration and assign
340 	 * them to endpoints dynamically according to maxpacket size value of
341 	 * given endpoint.
342 	 */
343 	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
344 		if (!txfsz[ep])
345 			continue;
346 		val = addr;
347 		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
348 		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
349 			  "insufficient fifo memory");
350 		addr += txfsz[ep];
351 
352 		dwc2_writel(hsotg, val, DPTXFSIZN(ep));
353 		val = dwc2_readl(hsotg, DPTXFSIZN(ep));
354 	}
355 
356 	dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
357 		    addr << GDFIFOCFG_EPINFOBASE_SHIFT,
358 		    GDFIFOCFG);
359 	/*
360 	 * according to p428 of the design guide, we need to ensure that
361 	 * all fifos are flushed before continuing
362 	 */
363 
364 	dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
365 	       GRSTCTL_RXFFLSH, GRSTCTL);
366 
367 	/* wait until the fifos are both flushed */
368 	timeout = 100;
369 	while (1) {
370 		val = dwc2_readl(hsotg, GRSTCTL);
371 
372 		if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
373 			break;
374 
375 		if (--timeout == 0) {
376 			dev_err(hsotg->dev,
377 				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
378 				__func__, val);
379 			break;
380 		}
381 
382 		udelay(1);
383 	}
384 
385 	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
386 }
387 
388 /**
389  * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
390  * @ep: USB endpoint to allocate request for.
391  * @flags: Allocation flags
392  *
393  * Allocate a new USB request structure appropriate for the specified endpoint
394  */
dwc2_hsotg_ep_alloc_request(struct usb_ep * ep,gfp_t flags)395 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
396 						       gfp_t flags)
397 {
398 	struct dwc2_hsotg_req *req;
399 
400 	req = kzalloc(sizeof(*req), flags);
401 	if (!req)
402 		return NULL;
403 
404 	INIT_LIST_HEAD(&req->queue);
405 
406 	return &req->req;
407 }
408 
409 /**
410  * is_ep_periodic - return true if the endpoint is in periodic mode.
411  * @hs_ep: The endpoint to query.
412  *
413  * Returns true if the endpoint is in periodic mode, meaning it is being
414  * used for an Interrupt or ISO transfer.
415  */
is_ep_periodic(struct dwc2_hsotg_ep * hs_ep)416 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
417 {
418 	return hs_ep->periodic;
419 }
420 
421 /**
422  * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
423  * @hsotg: The device state.
424  * @hs_ep: The endpoint for the request
425  * @hs_req: The request being processed.
426  *
427  * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
428  * of a request to ensure the buffer is ready for access by the caller.
429  */
dwc2_hsotg_unmap_dma(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req)430 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
431 				 struct dwc2_hsotg_ep *hs_ep,
432 				struct dwc2_hsotg_req *hs_req)
433 {
434 	struct usb_request *req = &hs_req->req;
435 
436 	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
437 }
438 
439 /*
440  * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
441  * for Control endpoint
442  * @hsotg: The device state.
443  *
444  * This function will allocate 4 descriptor chains for EP 0: 2 for
445  * Setup stage, per one for IN and OUT data/status transactions.
446  */
dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg * hsotg)447 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
448 {
449 	hsotg->setup_desc[0] =
450 		dmam_alloc_coherent(hsotg->dev,
451 				    sizeof(struct dwc2_dma_desc),
452 				    &hsotg->setup_desc_dma[0],
453 				    GFP_KERNEL);
454 	if (!hsotg->setup_desc[0])
455 		goto fail;
456 
457 	hsotg->setup_desc[1] =
458 		dmam_alloc_coherent(hsotg->dev,
459 				    sizeof(struct dwc2_dma_desc),
460 				    &hsotg->setup_desc_dma[1],
461 				    GFP_KERNEL);
462 	if (!hsotg->setup_desc[1])
463 		goto fail;
464 
465 	hsotg->ctrl_in_desc =
466 		dmam_alloc_coherent(hsotg->dev,
467 				    sizeof(struct dwc2_dma_desc),
468 				    &hsotg->ctrl_in_desc_dma,
469 				    GFP_KERNEL);
470 	if (!hsotg->ctrl_in_desc)
471 		goto fail;
472 
473 	hsotg->ctrl_out_desc =
474 		dmam_alloc_coherent(hsotg->dev,
475 				    sizeof(struct dwc2_dma_desc),
476 				    &hsotg->ctrl_out_desc_dma,
477 				    GFP_KERNEL);
478 	if (!hsotg->ctrl_out_desc)
479 		goto fail;
480 
481 	return 0;
482 
483 fail:
484 	return -ENOMEM;
485 }
486 
487 /**
488  * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
489  * @hsotg: The controller state.
490  * @hs_ep: The endpoint we're going to write for.
491  * @hs_req: The request to write data for.
492  *
493  * This is called when the TxFIFO has some space in it to hold a new
494  * transmission and we have something to give it. The actual setup of
495  * the data size is done elsewhere, so all we have to do is to actually
496  * write the data.
497  *
498  * The return value is zero if there is more space (or nothing was done)
499  * otherwise -ENOSPC is returned if the FIFO space was used up.
500  *
501  * This routine is only needed for PIO
502  */
dwc2_hsotg_write_fifo(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req)503 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
504 				 struct dwc2_hsotg_ep *hs_ep,
505 				struct dwc2_hsotg_req *hs_req)
506 {
507 	bool periodic = is_ep_periodic(hs_ep);
508 	u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
509 	int buf_pos = hs_req->req.actual;
510 	int to_write = hs_ep->size_loaded;
511 	void *data;
512 	int can_write;
513 	int pkt_round;
514 	int max_transfer;
515 
516 	to_write -= (buf_pos - hs_ep->last_load);
517 
518 	/* if there's nothing to write, get out early */
519 	if (to_write == 0)
520 		return 0;
521 
522 	if (periodic && !hsotg->dedicated_fifos) {
523 		u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
524 		int size_left;
525 		int size_done;
526 
527 		/*
528 		 * work out how much data was loaded so we can calculate
529 		 * how much data is left in the fifo.
530 		 */
531 
532 		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
533 
534 		/*
535 		 * if shared fifo, we cannot write anything until the
536 		 * previous data has been completely sent.
537 		 */
538 		if (hs_ep->fifo_load != 0) {
539 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
540 			return -ENOSPC;
541 		}
542 
543 		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
544 			__func__, size_left,
545 			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
546 
547 		/* how much of the data has moved */
548 		size_done = hs_ep->size_loaded - size_left;
549 
550 		/* how much data is left in the fifo */
551 		can_write = hs_ep->fifo_load - size_done;
552 		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
553 			__func__, can_write);
554 
555 		can_write = hs_ep->fifo_size - can_write;
556 		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
557 			__func__, can_write);
558 
559 		if (can_write <= 0) {
560 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
561 			return -ENOSPC;
562 		}
563 	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
564 		can_write = dwc2_readl(hsotg,
565 				       DTXFSTS(hs_ep->fifo_index));
566 
567 		can_write &= 0xffff;
568 		can_write *= 4;
569 	} else {
570 		if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
571 			dev_dbg(hsotg->dev,
572 				"%s: no queue slots available (0x%08x)\n",
573 				__func__, gnptxsts);
574 
575 			dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
576 			return -ENOSPC;
577 		}
578 
579 		can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
580 		can_write *= 4;	/* fifo size is in 32bit quantities. */
581 	}
582 
583 	max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
584 
585 	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
586 		__func__, gnptxsts, can_write, to_write, max_transfer);
587 
588 	/*
589 	 * limit to 512 bytes of data, it seems at least on the non-periodic
590 	 * FIFO, requests of >512 cause the endpoint to get stuck with a
591 	 * fragment of the end of the transfer in it.
592 	 */
593 	if (can_write > 512 && !periodic)
594 		can_write = 512;
595 
596 	/*
597 	 * limit the write to one max-packet size worth of data, but allow
598 	 * the transfer to return that it did not run out of fifo space
599 	 * doing it.
600 	 */
601 	if (to_write > max_transfer) {
602 		to_write = max_transfer;
603 
604 		/* it's needed only when we do not use dedicated fifos */
605 		if (!hsotg->dedicated_fifos)
606 			dwc2_hsotg_en_gsint(hsotg,
607 					    periodic ? GINTSTS_PTXFEMP :
608 					   GINTSTS_NPTXFEMP);
609 	}
610 
611 	/* see if we can write data */
612 
613 	if (to_write > can_write) {
614 		to_write = can_write;
615 		pkt_round = to_write % max_transfer;
616 
617 		/*
618 		 * Round the write down to an
619 		 * exact number of packets.
620 		 *
621 		 * Note, we do not currently check to see if we can ever
622 		 * write a full packet or not to the FIFO.
623 		 */
624 
625 		if (pkt_round)
626 			to_write -= pkt_round;
627 
628 		/*
629 		 * enable correct FIFO interrupt to alert us when there
630 		 * is more room left.
631 		 */
632 
633 		/* it's needed only when we do not use dedicated fifos */
634 		if (!hsotg->dedicated_fifos)
635 			dwc2_hsotg_en_gsint(hsotg,
636 					    periodic ? GINTSTS_PTXFEMP :
637 					   GINTSTS_NPTXFEMP);
638 	}
639 
640 	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
641 		to_write, hs_req->req.length, can_write, buf_pos);
642 
643 	if (to_write <= 0)
644 		return -ENOSPC;
645 
646 	hs_req->req.actual = buf_pos + to_write;
647 	hs_ep->total_data += to_write;
648 
649 	if (periodic)
650 		hs_ep->fifo_load += to_write;
651 
652 	to_write = DIV_ROUND_UP(to_write, 4);
653 	data = hs_req->req.buf + buf_pos;
654 
655 	dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
656 
657 	return (to_write >= can_write) ? -ENOSPC : 0;
658 }
659 
660 /**
661  * get_ep_limit - get the maximum data legnth for this endpoint
662  * @hs_ep: The endpoint
663  *
664  * Return the maximum data that can be queued in one go on a given endpoint
665  * so that transfers that are too long can be split.
666  */
get_ep_limit(struct dwc2_hsotg_ep * hs_ep)667 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
668 {
669 	int index = hs_ep->index;
670 	unsigned int maxsize;
671 	unsigned int maxpkt;
672 
673 	if (index != 0) {
674 		maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
675 		maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
676 	} else {
677 		maxsize = 64 + 64;
678 		if (hs_ep->dir_in)
679 			maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
680 		else
681 			maxpkt = 2;
682 	}
683 
684 	/* we made the constant loading easier above by using +1 */
685 	maxpkt--;
686 	maxsize--;
687 
688 	/*
689 	 * constrain by packet count if maxpkts*pktsize is greater
690 	 * than the length register size.
691 	 */
692 
693 	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
694 		maxsize = maxpkt * hs_ep->ep.maxpacket;
695 
696 	return maxsize;
697 }
698 
699 /**
700  * dwc2_hsotg_read_frameno - read current frame number
701  * @hsotg: The device instance
702  *
703  * Return the current frame number
704  */
dwc2_hsotg_read_frameno(struct dwc2_hsotg * hsotg)705 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
706 {
707 	u32 dsts;
708 
709 	dsts = dwc2_readl(hsotg, DSTS);
710 	dsts &= DSTS_SOFFN_MASK;
711 	dsts >>= DSTS_SOFFN_SHIFT;
712 
713 	return dsts;
714 }
715 
716 /**
717  * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
718  * DMA descriptor chain prepared for specific endpoint
719  * @hs_ep: The endpoint
720  *
721  * Return the maximum data that can be queued in one go on a given endpoint
722  * depending on its descriptor chain capacity so that transfers that
723  * are too long can be split.
724  */
dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep * hs_ep)725 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
726 {
727 	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
728 	int is_isoc = hs_ep->isochronous;
729 	unsigned int maxsize;
730 	u32 mps = hs_ep->ep.maxpacket;
731 	int dir_in = hs_ep->dir_in;
732 
733 	if (is_isoc)
734 		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
735 					   DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
736 					   MAX_DMA_DESC_NUM_HS_ISOC;
737 	else
738 		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
739 
740 	/* Interrupt OUT EP with mps not multiple of 4 */
741 	if (hs_ep->index)
742 		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
743 			maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
744 
745 	return maxsize;
746 }
747 
748 /*
749  * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
750  * @hs_ep: The endpoint
751  * @mask: RX/TX bytes mask to be defined
752  *
753  * Returns maximum data payload for one descriptor after analyzing endpoint
754  * characteristics.
755  * DMA descriptor transfer bytes limit depends on EP type:
756  * Control out - MPS,
757  * Isochronous - descriptor rx/tx bytes bitfield limit,
758  * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
759  * have concatenations from various descriptors within one packet.
760  * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
761  * to a single descriptor.
762  *
763  * Selects corresponding mask for RX/TX bytes as well.
764  */
dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep * hs_ep,u32 * mask)765 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
766 {
767 	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
768 	u32 mps = hs_ep->ep.maxpacket;
769 	int dir_in = hs_ep->dir_in;
770 	u32 desc_size = 0;
771 
772 	if (!hs_ep->index && !dir_in) {
773 		desc_size = mps;
774 		*mask = DEV_DMA_NBYTES_MASK;
775 	} else if (hs_ep->isochronous) {
776 		if (dir_in) {
777 			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
778 			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
779 		} else {
780 			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
781 			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
782 		}
783 	} else {
784 		desc_size = DEV_DMA_NBYTES_LIMIT;
785 		*mask = DEV_DMA_NBYTES_MASK;
786 
787 		/* Round down desc_size to be mps multiple */
788 		desc_size -= desc_size % mps;
789 	}
790 
791 	/* Interrupt OUT EP with mps not multiple of 4 */
792 	if (hs_ep->index)
793 		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
794 			desc_size = mps;
795 			*mask = DEV_DMA_NBYTES_MASK;
796 		}
797 
798 	return desc_size;
799 }
800 
dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep * hs_ep,struct dwc2_dma_desc ** desc,dma_addr_t dma_buff,unsigned int len,bool true_last)801 static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
802 						 struct dwc2_dma_desc **desc,
803 						 dma_addr_t dma_buff,
804 						 unsigned int len,
805 						 bool true_last)
806 {
807 	int dir_in = hs_ep->dir_in;
808 	u32 mps = hs_ep->ep.maxpacket;
809 	u32 maxsize = 0;
810 	u32 offset = 0;
811 	u32 mask = 0;
812 	int i;
813 
814 	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
815 
816 	hs_ep->desc_count = (len / maxsize) +
817 				((len % maxsize) ? 1 : 0);
818 	if (len == 0)
819 		hs_ep->desc_count = 1;
820 
821 	for (i = 0; i < hs_ep->desc_count; ++i) {
822 		(*desc)->status = 0;
823 		(*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
824 				 << DEV_DMA_BUFF_STS_SHIFT);
825 
826 		if (len > maxsize) {
827 			if (!hs_ep->index && !dir_in)
828 				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
829 
830 			(*desc)->status |=
831 				maxsize << DEV_DMA_NBYTES_SHIFT & mask;
832 			(*desc)->buf = dma_buff + offset;
833 
834 			len -= maxsize;
835 			offset += maxsize;
836 		} else {
837 			if (true_last)
838 				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
839 
840 			if (dir_in)
841 				(*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
842 					((hs_ep->send_zlp && true_last) ?
843 					DEV_DMA_SHORT : 0);
844 
845 			(*desc)->status |=
846 				len << DEV_DMA_NBYTES_SHIFT & mask;
847 			(*desc)->buf = dma_buff + offset;
848 		}
849 
850 		(*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
851 		(*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
852 				 << DEV_DMA_BUFF_STS_SHIFT);
853 		(*desc)++;
854 	}
855 }
856 
857 /*
858  * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
859  * @hs_ep: The endpoint
860  * @ureq: Request to transfer
861  * @offset: offset in bytes
862  * @len: Length of the transfer
863  *
864  * This function will iterate over descriptor chain and fill its entries
865  * with corresponding information based on transfer data.
866  */
dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep * hs_ep,dma_addr_t dma_buff,unsigned int len)867 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
868 						 dma_addr_t dma_buff,
869 						 unsigned int len)
870 {
871 	struct usb_request *ureq = NULL;
872 	struct dwc2_dma_desc *desc = hs_ep->desc_list;
873 	struct scatterlist *sg;
874 	int i;
875 	u8 desc_count = 0;
876 
877 	if (hs_ep->req)
878 		ureq = &hs_ep->req->req;
879 
880 	/* non-DMA sg buffer */
881 	if (!ureq || !ureq->num_sgs) {
882 		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
883 			dma_buff, len, true);
884 		return;
885 	}
886 
887 	/* DMA sg buffer */
888 	for_each_sg(ureq->sg, sg, ureq->num_mapped_sgs, i) {
889 		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
890 			sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
891 			(i == (ureq->num_mapped_sgs - 1)));
892 		desc_count += hs_ep->desc_count;
893 	}
894 
895 	hs_ep->desc_count = desc_count;
896 }
897 
898 /*
899  * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
900  * @hs_ep: The isochronous endpoint.
901  * @dma_buff: usb requests dma buffer.
902  * @len: usb request transfer length.
903  *
904  * Fills next free descriptor with the data of the arrived usb request,
905  * frame info, sets Last and IOC bits increments next_desc. If filled
906  * descriptor is not the first one, removes L bit from the previous descriptor
907  * status.
908  */
dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep * hs_ep,dma_addr_t dma_buff,unsigned int len)909 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
910 				      dma_addr_t dma_buff, unsigned int len)
911 {
912 	struct dwc2_dma_desc *desc;
913 	struct dwc2_hsotg *hsotg = hs_ep->parent;
914 	u32 index;
915 	u32 mask = 0;
916 	u8 pid = 0;
917 
918 	dwc2_gadget_get_desc_params(hs_ep, &mask);
919 
920 	index = hs_ep->next_desc;
921 	desc = &hs_ep->desc_list[index];
922 
923 	/* Check if descriptor chain full */
924 	if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
925 	    DEV_DMA_BUFF_STS_HREADY) {
926 		dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
927 		return 1;
928 	}
929 
930 	/* Clear L bit of previous desc if more than one entries in the chain */
931 	if (hs_ep->next_desc)
932 		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
933 
934 	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
935 		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
936 
937 	desc->status = 0;
938 	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
939 
940 	desc->buf = dma_buff;
941 	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
942 			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
943 
944 	if (hs_ep->dir_in) {
945 		if (len)
946 			pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
947 		else
948 			pid = 1;
949 		desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
950 				 DEV_DMA_ISOC_PID_MASK) |
951 				((len % hs_ep->ep.maxpacket) ?
952 				 DEV_DMA_SHORT : 0) |
953 				((hs_ep->target_frame <<
954 				  DEV_DMA_ISOC_FRNUM_SHIFT) &
955 				 DEV_DMA_ISOC_FRNUM_MASK);
956 	}
957 
958 	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
959 	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
960 
961 	/* Increment frame number by interval for IN */
962 	if (hs_ep->dir_in)
963 		dwc2_gadget_incr_frame_num(hs_ep);
964 
965 	/* Update index of last configured entry in the chain */
966 	hs_ep->next_desc++;
967 	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
968 		hs_ep->next_desc = 0;
969 
970 	return 0;
971 }
972 
973 /*
974  * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
975  * @hs_ep: The isochronous endpoint.
976  *
977  * Prepare descriptor chain for isochronous endpoints. Afterwards
978  * write DMA address to HW and enable the endpoint.
979  */
dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep * hs_ep)980 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
981 {
982 	struct dwc2_hsotg *hsotg = hs_ep->parent;
983 	struct dwc2_hsotg_req *hs_req, *treq;
984 	int index = hs_ep->index;
985 	int ret;
986 	int i;
987 	u32 dma_reg;
988 	u32 depctl;
989 	u32 ctrl;
990 	struct dwc2_dma_desc *desc;
991 
992 	if (list_empty(&hs_ep->queue)) {
993 		hs_ep->target_frame = TARGET_FRAME_INITIAL;
994 		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
995 		return;
996 	}
997 
998 	/* Initialize descriptor chain by Host Busy status */
999 	for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
1000 		desc = &hs_ep->desc_list[i];
1001 		desc->status = 0;
1002 		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
1003 				    << DEV_DMA_BUFF_STS_SHIFT);
1004 	}
1005 
1006 	hs_ep->next_desc = 0;
1007 	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1008 		dma_addr_t dma_addr = hs_req->req.dma;
1009 
1010 		if (hs_req->req.num_sgs) {
1011 			WARN_ON(hs_req->req.num_sgs > 1);
1012 			dma_addr = sg_dma_address(hs_req->req.sg);
1013 		}
1014 		ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1015 						 hs_req->req.length);
1016 		if (ret)
1017 			break;
1018 	}
1019 
1020 	hs_ep->compl_desc = 0;
1021 	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1022 	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
1023 
1024 	/* write descriptor chain address to control register */
1025 	dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1026 
1027 	ctrl = dwc2_readl(hsotg, depctl);
1028 	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
1029 	dwc2_writel(hsotg, ctrl, depctl);
1030 }
1031 
1032 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
1033 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1034 					struct dwc2_hsotg_ep *hs_ep,
1035 				       struct dwc2_hsotg_req *hs_req,
1036 				       int result);
1037 
1038 /**
1039  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1040  * @hsotg: The controller state.
1041  * @hs_ep: The endpoint to process a request for
1042  * @hs_req: The request to start.
1043  * @continuing: True if we are doing more for the current request.
1044  *
1045  * Start the given request running by setting the endpoint registers
1046  * appropriately, and writing any data to the FIFOs.
1047  */
dwc2_hsotg_start_req(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req,bool continuing)1048 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
1049 				 struct dwc2_hsotg_ep *hs_ep,
1050 				struct dwc2_hsotg_req *hs_req,
1051 				bool continuing)
1052 {
1053 	struct usb_request *ureq = &hs_req->req;
1054 	int index = hs_ep->index;
1055 	int dir_in = hs_ep->dir_in;
1056 	u32 epctrl_reg;
1057 	u32 epsize_reg;
1058 	u32 epsize;
1059 	u32 ctrl;
1060 	unsigned int length;
1061 	unsigned int packets;
1062 	unsigned int maxreq;
1063 	unsigned int dma_reg;
1064 
1065 	if (index != 0) {
1066 		if (hs_ep->req && !continuing) {
1067 			dev_err(hsotg->dev, "%s: active request\n", __func__);
1068 			WARN_ON(1);
1069 			return;
1070 		} else if (hs_ep->req != hs_req && continuing) {
1071 			dev_err(hsotg->dev,
1072 				"%s: continue different req\n", __func__);
1073 			WARN_ON(1);
1074 			return;
1075 		}
1076 	}
1077 
1078 	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
1079 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
1080 	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1081 
1082 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1083 		__func__, dwc2_readl(hsotg, epctrl_reg), index,
1084 		hs_ep->dir_in ? "in" : "out");
1085 
1086 	/* If endpoint is stalled, we will restart request later */
1087 	ctrl = dwc2_readl(hsotg, epctrl_reg);
1088 
1089 	if (index && ctrl & DXEPCTL_STALL) {
1090 		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1091 		return;
1092 	}
1093 
1094 	length = ureq->length - ureq->actual;
1095 	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1096 		ureq->length, ureq->actual);
1097 
1098 	if (!using_desc_dma(hsotg))
1099 		maxreq = get_ep_limit(hs_ep);
1100 	else
1101 		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
1102 
1103 	if (length > maxreq) {
1104 		int round = maxreq % hs_ep->ep.maxpacket;
1105 
1106 		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1107 			__func__, length, maxreq, round);
1108 
1109 		/* round down to multiple of packets */
1110 		if (round)
1111 			maxreq -= round;
1112 
1113 		length = maxreq;
1114 	}
1115 
1116 	if (length)
1117 		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1118 	else
1119 		packets = 1;	/* send one packet if length is zero. */
1120 
1121 	if (dir_in && index != 0)
1122 		if (hs_ep->isochronous)
1123 			epsize = DXEPTSIZ_MC(packets);
1124 		else
1125 			epsize = DXEPTSIZ_MC(1);
1126 	else
1127 		epsize = 0;
1128 
1129 	/*
1130 	 * zero length packet should be programmed on its own and should not
1131 	 * be counted in DIEPTSIZ.PktCnt with other packets.
1132 	 */
1133 	if (dir_in && ureq->zero && !continuing) {
1134 		/* Test if zlp is actually required. */
1135 		if ((ureq->length >= hs_ep->ep.maxpacket) &&
1136 		    !(ureq->length % hs_ep->ep.maxpacket))
1137 			hs_ep->send_zlp = 1;
1138 	}
1139 
1140 	epsize |= DXEPTSIZ_PKTCNT(packets);
1141 	epsize |= DXEPTSIZ_XFERSIZE(length);
1142 
1143 	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1144 		__func__, packets, length, ureq->length, epsize, epsize_reg);
1145 
1146 	/* store the request as the current one we're doing */
1147 	hs_ep->req = hs_req;
1148 
1149 	if (using_desc_dma(hsotg)) {
1150 		u32 offset = 0;
1151 		u32 mps = hs_ep->ep.maxpacket;
1152 
1153 		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1154 		if (!dir_in) {
1155 			if (!index)
1156 				length = mps;
1157 			else if (length % mps)
1158 				length += (mps - (length % mps));
1159 		}
1160 
1161 		if (continuing)
1162 			offset = ureq->actual;
1163 
1164 		/* Fill DDMA chain entries */
1165 		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1166 						     length);
1167 
1168 		/* write descriptor chain address to control register */
1169 		dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1170 
1171 		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1172 			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
1173 	} else {
1174 		/* write size / packets */
1175 		dwc2_writel(hsotg, epsize, epsize_reg);
1176 
1177 		if (using_dma(hsotg) && !continuing && (length != 0)) {
1178 			/*
1179 			 * write DMA address to control register, buffer
1180 			 * already synced by dwc2_hsotg_ep_queue().
1181 			 */
1182 
1183 			dwc2_writel(hsotg, ureq->dma, dma_reg);
1184 
1185 			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1186 				__func__, &ureq->dma, dma_reg);
1187 		}
1188 	}
1189 
1190 	if (hs_ep->isochronous) {
1191 		if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
1192 			if (hs_ep->interval == 1) {
1193 				if (hs_ep->target_frame & 0x1)
1194 					ctrl |= DXEPCTL_SETODDFR;
1195 				else
1196 					ctrl |= DXEPCTL_SETEVENFR;
1197 			}
1198 			ctrl |= DXEPCTL_CNAK;
1199 		} else {
1200 			hs_req->req.frame_number = hs_ep->target_frame;
1201 			hs_req->req.actual = 0;
1202 			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1203 			return;
1204 		}
1205 	}
1206 
1207 	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
1208 
1209 	dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1210 
1211 	/* For Setup request do not clear NAK */
1212 	if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1213 		ctrl |= DXEPCTL_CNAK;	/* clear NAK set by core */
1214 
1215 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1216 	dwc2_writel(hsotg, ctrl, epctrl_reg);
1217 
1218 	/*
1219 	 * set these, it seems that DMA support increments past the end
1220 	 * of the packet buffer so we need to calculate the length from
1221 	 * this information.
1222 	 */
1223 	hs_ep->size_loaded = length;
1224 	hs_ep->last_load = ureq->actual;
1225 
1226 	if (dir_in && !using_dma(hsotg)) {
1227 		/* set these anyway, we may need them for non-periodic in */
1228 		hs_ep->fifo_load = 0;
1229 
1230 		dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1231 	}
1232 
1233 	/*
1234 	 * Note, trying to clear the NAK here causes problems with transmit
1235 	 * on the S3C6400 ending up with the TXFIFO becoming full.
1236 	 */
1237 
1238 	/* check ep is enabled */
1239 	if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
1240 		dev_dbg(hsotg->dev,
1241 			"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
1242 			 index, dwc2_readl(hsotg, epctrl_reg));
1243 
1244 	dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1245 		__func__, dwc2_readl(hsotg, epctrl_reg));
1246 
1247 	/* enable ep interrupts */
1248 	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1249 }
1250 
1251 /**
1252  * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1253  * @hsotg: The device state.
1254  * @hs_ep: The endpoint the request is on.
1255  * @req: The request being processed.
1256  *
1257  * We've been asked to queue a request, so ensure that the memory buffer
1258  * is correctly setup for DMA. If we've been passed an extant DMA address
1259  * then ensure the buffer has been synced to memory. If our buffer has no
1260  * DMA memory, then we map the memory and mark our request to allow us to
1261  * cleanup on completion.
1262  */
dwc2_hsotg_map_dma(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct usb_request * req)1263 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
1264 			      struct dwc2_hsotg_ep *hs_ep,
1265 			     struct usb_request *req)
1266 {
1267 	int ret;
1268 
1269 	hs_ep->map_dir = hs_ep->dir_in;
1270 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1271 	if (ret)
1272 		goto dma_error;
1273 
1274 	return 0;
1275 
1276 dma_error:
1277 	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1278 		__func__, req->buf, req->length);
1279 
1280 	return -EIO;
1281 }
1282 
dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req)1283 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
1284 						 struct dwc2_hsotg_ep *hs_ep,
1285 						 struct dwc2_hsotg_req *hs_req)
1286 {
1287 	void *req_buf = hs_req->req.buf;
1288 
1289 	/* If dma is not being used or buffer is aligned */
1290 	if (!using_dma(hsotg) || !((long)req_buf & 3))
1291 		return 0;
1292 
1293 	WARN_ON(hs_req->saved_req_buf);
1294 
1295 	dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1296 		hs_ep->ep.name, req_buf, hs_req->req.length);
1297 
1298 	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1299 	if (!hs_req->req.buf) {
1300 		hs_req->req.buf = req_buf;
1301 		dev_err(hsotg->dev,
1302 			"%s: unable to allocate memory for bounce buffer\n",
1303 			__func__);
1304 		return -ENOMEM;
1305 	}
1306 
1307 	/* Save actual buffer */
1308 	hs_req->saved_req_buf = req_buf;
1309 
1310 	if (hs_ep->dir_in)
1311 		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1312 	return 0;
1313 }
1314 
1315 static void
dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req)1316 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
1317 					 struct dwc2_hsotg_ep *hs_ep,
1318 					 struct dwc2_hsotg_req *hs_req)
1319 {
1320 	/* If dma is not being used or buffer was aligned */
1321 	if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1322 		return;
1323 
1324 	dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1325 		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1326 
1327 	/* Copy data from bounce buffer on successful out transfer */
1328 	if (!hs_ep->dir_in && !hs_req->req.status)
1329 		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1330 		       hs_req->req.actual);
1331 
1332 	/* Free bounce buffer */
1333 	kfree(hs_req->req.buf);
1334 
1335 	hs_req->req.buf = hs_req->saved_req_buf;
1336 	hs_req->saved_req_buf = NULL;
1337 }
1338 
1339 /**
1340  * dwc2_gadget_target_frame_elapsed - Checks target frame
1341  * @hs_ep: The driver endpoint to check
1342  *
1343  * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
1344  * corresponding transfer.
1345  */
dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep * hs_ep)1346 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1347 {
1348 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1349 	u32 target_frame = hs_ep->target_frame;
1350 	u32 current_frame = hsotg->frame_number;
1351 	bool frame_overrun = hs_ep->frame_overrun;
1352 	u16 limit = DSTS_SOFFN_LIMIT;
1353 
1354 	if (hsotg->gadget.speed != USB_SPEED_HIGH)
1355 		limit >>= 3;
1356 
1357 	if (!frame_overrun && current_frame >= target_frame)
1358 		return true;
1359 
1360 	if (frame_overrun && current_frame >= target_frame &&
1361 	    ((current_frame - target_frame) < limit / 2))
1362 		return true;
1363 
1364 	return false;
1365 }
1366 
1367 /*
1368  * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1369  * @hsotg: The driver state
1370  * @hs_ep: the ep descriptor chain is for
1371  *
1372  * Called to update EP0 structure's pointers depend on stage of
1373  * control transfer.
1374  */
dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep)1375 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
1376 					  struct dwc2_hsotg_ep *hs_ep)
1377 {
1378 	switch (hsotg->ep0_state) {
1379 	case DWC2_EP0_SETUP:
1380 	case DWC2_EP0_STATUS_OUT:
1381 		hs_ep->desc_list = hsotg->setup_desc[0];
1382 		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1383 		break;
1384 	case DWC2_EP0_DATA_IN:
1385 	case DWC2_EP0_STATUS_IN:
1386 		hs_ep->desc_list = hsotg->ctrl_in_desc;
1387 		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1388 		break;
1389 	case DWC2_EP0_DATA_OUT:
1390 		hs_ep->desc_list = hsotg->ctrl_out_desc;
1391 		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1392 		break;
1393 	default:
1394 		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1395 			hsotg->ep0_state);
1396 		return -EINVAL;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
dwc2_hsotg_ep_queue(struct usb_ep * ep,struct usb_request * req,gfp_t gfp_flags)1402 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
1403 			       gfp_t gfp_flags)
1404 {
1405 	struct dwc2_hsotg_req *hs_req = our_req(req);
1406 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1407 	struct dwc2_hsotg *hs = hs_ep->parent;
1408 	bool first;
1409 	int ret;
1410 	u32 maxsize = 0;
1411 	u32 mask = 0;
1412 
1413 
1414 	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1415 		ep->name, req, req->length, req->buf, req->no_interrupt,
1416 		req->zero, req->short_not_ok);
1417 
1418 	if (hs->lx_state == DWC2_L1) {
1419 		dwc2_wakeup_from_lpm_l1(hs, true);
1420 	}
1421 
1422 	/* Prevent new request submission when controller is suspended */
1423 	if (hs->lx_state != DWC2_L0) {
1424 		dev_dbg(hs->dev, "%s: submit request only in active state\n",
1425 			__func__);
1426 		return -EAGAIN;
1427 	}
1428 
1429 	/* initialise status of the request */
1430 	INIT_LIST_HEAD(&hs_req->queue);
1431 	req->actual = 0;
1432 	req->status = -EINPROGRESS;
1433 
1434 	/* Don't queue ISOC request if length greater than mps*mc */
1435 	if (hs_ep->isochronous &&
1436 	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1437 		dev_err(hs->dev, "req length > maxpacket*mc\n");
1438 		return -EINVAL;
1439 	}
1440 
1441 	/* In DDMA mode for ISOC's don't queue request if length greater
1442 	 * than descriptor limits.
1443 	 */
1444 	if (using_desc_dma(hs) && hs_ep->isochronous) {
1445 		maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1446 		if (hs_ep->dir_in && req->length > maxsize) {
1447 			dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
1448 				req->length, maxsize);
1449 			return -EINVAL;
1450 		}
1451 
1452 		if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
1453 			dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
1454 				req->length, hs_ep->ep.maxpacket);
1455 			return -EINVAL;
1456 		}
1457 	}
1458 
1459 	ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1460 	if (ret)
1461 		return ret;
1462 
1463 	/* if we're using DMA, sync the buffers as necessary */
1464 	if (using_dma(hs)) {
1465 		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
1466 		if (ret)
1467 			return ret;
1468 	}
1469 	/* If using descriptor DMA configure EP0 descriptor chain pointers */
1470 	if (using_desc_dma(hs) && !hs_ep->index) {
1471 		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
1472 		if (ret)
1473 			return ret;
1474 	}
1475 
1476 	first = list_empty(&hs_ep->queue);
1477 	list_add_tail(&hs_req->queue, &hs_ep->queue);
1478 
1479 	/*
1480 	 * Handle DDMA isochronous transfers separately - just add new entry
1481 	 * to the descriptor chain.
1482 	 * Transfer will be started once SW gets either one of NAK or
1483 	 * OutTknEpDis interrupts.
1484 	 */
1485 	if (using_desc_dma(hs) && hs_ep->isochronous) {
1486 		if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1487 			dma_addr_t dma_addr = hs_req->req.dma;
1488 
1489 			if (hs_req->req.num_sgs) {
1490 				WARN_ON(hs_req->req.num_sgs > 1);
1491 				dma_addr = sg_dma_address(hs_req->req.sg);
1492 			}
1493 			dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1494 						   hs_req->req.length);
1495 		}
1496 		return 0;
1497 	}
1498 
1499 	/* Change EP direction if status phase request is after data out */
1500 	if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
1501 	    hs->ep0_state == DWC2_EP0_DATA_OUT)
1502 		hs_ep->dir_in = 1;
1503 
1504 	if (first) {
1505 		if (!hs_ep->isochronous) {
1506 			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1507 			return 0;
1508 		}
1509 
1510 		/* Update current frame number value. */
1511 		hs->frame_number = dwc2_hsotg_read_frameno(hs);
1512 		while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
1513 			dwc2_gadget_incr_frame_num(hs_ep);
1514 			/* Update current frame number value once more as it
1515 			 * changes here.
1516 			 */
1517 			hs->frame_number = dwc2_hsotg_read_frameno(hs);
1518 		}
1519 
1520 		if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1521 			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1522 	}
1523 	return 0;
1524 }
1525 
dwc2_hsotg_ep_queue_lock(struct usb_ep * ep,struct usb_request * req,gfp_t gfp_flags)1526 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
1527 				    gfp_t gfp_flags)
1528 {
1529 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1530 	struct dwc2_hsotg *hs = hs_ep->parent;
1531 	unsigned long flags;
1532 	int ret;
1533 
1534 	spin_lock_irqsave(&hs->lock, flags);
1535 	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
1536 	spin_unlock_irqrestore(&hs->lock, flags);
1537 
1538 	return ret;
1539 }
1540 
dwc2_hsotg_ep_free_request(struct usb_ep * ep,struct usb_request * req)1541 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
1542 				       struct usb_request *req)
1543 {
1544 	struct dwc2_hsotg_req *hs_req = our_req(req);
1545 
1546 	kfree(hs_req);
1547 }
1548 
1549 /**
1550  * dwc2_hsotg_complete_oursetup - setup completion callback
1551  * @ep: The endpoint the request was on.
1552  * @req: The request completed.
1553  *
1554  * Called on completion of any requests the driver itself
1555  * submitted that need cleaning up.
1556  */
dwc2_hsotg_complete_oursetup(struct usb_ep * ep,struct usb_request * req)1557 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
1558 					 struct usb_request *req)
1559 {
1560 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1561 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1562 
1563 	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1564 
1565 	dwc2_hsotg_ep_free_request(ep, req);
1566 }
1567 
1568 /**
1569  * ep_from_windex - convert control wIndex value to endpoint
1570  * @hsotg: The driver state.
1571  * @windex: The control request wIndex field (in host order).
1572  *
1573  * Convert the given wIndex into a pointer to an driver endpoint
1574  * structure, or return NULL if it is not a valid endpoint.
1575  */
ep_from_windex(struct dwc2_hsotg * hsotg,u32 windex)1576 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
1577 					    u32 windex)
1578 {
1579 	int dir = (windex & USB_DIR_IN) ? 1 : 0;
1580 	int idx = windex & 0x7F;
1581 
1582 	if (windex >= 0x100)
1583 		return NULL;
1584 
1585 	if (idx > hsotg->num_of_eps)
1586 		return NULL;
1587 
1588 	return index_to_ep(hsotg, idx, dir);
1589 }
1590 
1591 /**
1592  * dwc2_hsotg_set_test_mode - Enable usb Test Modes
1593  * @hsotg: The driver state.
1594  * @testmode: requested usb test mode
1595  * Enable usb Test Mode requested by the Host.
1596  */
dwc2_hsotg_set_test_mode(struct dwc2_hsotg * hsotg,int testmode)1597 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
1598 {
1599 	int dctl = dwc2_readl(hsotg, DCTL);
1600 
1601 	dctl &= ~DCTL_TSTCTL_MASK;
1602 	switch (testmode) {
1603 	case USB_TEST_J:
1604 	case USB_TEST_K:
1605 	case USB_TEST_SE0_NAK:
1606 	case USB_TEST_PACKET:
1607 	case USB_TEST_FORCE_ENABLE:
1608 		dctl |= testmode << DCTL_TSTCTL_SHIFT;
1609 		break;
1610 	default:
1611 		return -EINVAL;
1612 	}
1613 	dwc2_writel(hsotg, dctl, DCTL);
1614 	return 0;
1615 }
1616 
1617 /**
1618  * dwc2_hsotg_send_reply - send reply to control request
1619  * @hsotg: The device state
1620  * @ep: Endpoint 0
1621  * @buff: Buffer for request
1622  * @length: Length of reply.
1623  *
1624  * Create a request and queue it on the given endpoint. This is useful as
1625  * an internal method of sending replies to certain control requests, etc.
1626  */
dwc2_hsotg_send_reply(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * ep,void * buff,int length)1627 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
1628 				 struct dwc2_hsotg_ep *ep,
1629 				void *buff,
1630 				int length)
1631 {
1632 	struct usb_request *req;
1633 	int ret;
1634 
1635 	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1636 
1637 	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1638 	hsotg->ep0_reply = req;
1639 	if (!req) {
1640 		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1641 		return -ENOMEM;
1642 	}
1643 
1644 	req->buf = hsotg->ep0_buff;
1645 	req->length = length;
1646 	/*
1647 	 * zero flag is for sending zlp in DATA IN stage. It has no impact on
1648 	 * STATUS stage.
1649 	 */
1650 	req->zero = 0;
1651 	req->complete = dwc2_hsotg_complete_oursetup;
1652 
1653 	if (length)
1654 		memcpy(req->buf, buff, length);
1655 
1656 	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1657 	if (ret) {
1658 		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1659 		return ret;
1660 	}
1661 
1662 	return 0;
1663 }
1664 
1665 /**
1666  * dwc2_hsotg_process_req_status - process request GET_STATUS
1667  * @hsotg: The device state
1668  * @ctrl: USB control request
1669  */
dwc2_hsotg_process_req_status(struct dwc2_hsotg * hsotg,struct usb_ctrlrequest * ctrl)1670 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1671 					 struct usb_ctrlrequest *ctrl)
1672 {
1673 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1674 	struct dwc2_hsotg_ep *ep;
1675 	__le16 reply;
1676 	u16 status;
1677 	int ret;
1678 
1679 	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1680 
1681 	if (!ep0->dir_in) {
1682 		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1683 		return -EINVAL;
1684 	}
1685 
1686 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1687 	case USB_RECIP_DEVICE:
1688 		status = hsotg->gadget.is_selfpowered <<
1689 			 USB_DEVICE_SELF_POWERED;
1690 		status |= hsotg->remote_wakeup_allowed <<
1691 			  USB_DEVICE_REMOTE_WAKEUP;
1692 		reply = cpu_to_le16(status);
1693 		break;
1694 
1695 	case USB_RECIP_INTERFACE:
1696 		/* currently, the data result should be zero */
1697 		reply = cpu_to_le16(0);
1698 		break;
1699 
1700 	case USB_RECIP_ENDPOINT:
1701 		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1702 		if (!ep)
1703 			return -ENOENT;
1704 
1705 		reply = cpu_to_le16(ep->halted ? 1 : 0);
1706 		break;
1707 
1708 	default:
1709 		return 0;
1710 	}
1711 
1712 	if (le16_to_cpu(ctrl->wLength) != 2)
1713 		return -EINVAL;
1714 
1715 	ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
1716 	if (ret) {
1717 		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1718 		return ret;
1719 	}
1720 
1721 	return 1;
1722 }
1723 
1724 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1725 
1726 /**
1727  * get_ep_head - return the first request on the endpoint
1728  * @hs_ep: The controller endpoint to get
1729  *
1730  * Get the first request on the endpoint.
1731  */
get_ep_head(struct dwc2_hsotg_ep * hs_ep)1732 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
1733 {
1734 	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
1735 					queue);
1736 }
1737 
1738 /**
1739  * dwc2_gadget_start_next_request - Starts next request from ep queue
1740  * @hs_ep: Endpoint structure
1741  *
1742  * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
1743  * in its handler. Hence we need to unmask it here to be able to do
1744  * resynchronization.
1745  */
dwc2_gadget_start_next_request(struct dwc2_hsotg_ep * hs_ep)1746 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1747 {
1748 	struct dwc2_hsotg *hsotg = hs_ep->parent;
1749 	int dir_in = hs_ep->dir_in;
1750 	struct dwc2_hsotg_req *hs_req;
1751 
1752 	if (!list_empty(&hs_ep->queue)) {
1753 		hs_req = get_ep_head(hs_ep);
1754 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1755 		return;
1756 	}
1757 	if (!hs_ep->isochronous)
1758 		return;
1759 
1760 	if (dir_in) {
1761 		dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
1762 			__func__);
1763 	} else {
1764 		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1765 			__func__);
1766 	}
1767 }
1768 
1769 /**
1770  * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1771  * @hsotg: The device state
1772  * @ctrl: USB control request
1773  */
dwc2_hsotg_process_req_feature(struct dwc2_hsotg * hsotg,struct usb_ctrlrequest * ctrl)1774 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1775 					  struct usb_ctrlrequest *ctrl)
1776 {
1777 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1778 	struct dwc2_hsotg_req *hs_req;
1779 	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1780 	struct dwc2_hsotg_ep *ep;
1781 	int ret;
1782 	bool halted;
1783 	u32 recip;
1784 	u32 wValue;
1785 	u32 wIndex;
1786 
1787 	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1788 		__func__, set ? "SET" : "CLEAR");
1789 
1790 	wValue = le16_to_cpu(ctrl->wValue);
1791 	wIndex = le16_to_cpu(ctrl->wIndex);
1792 	recip = ctrl->bRequestType & USB_RECIP_MASK;
1793 
1794 	switch (recip) {
1795 	case USB_RECIP_DEVICE:
1796 		switch (wValue) {
1797 		case USB_DEVICE_REMOTE_WAKEUP:
1798 			if (set)
1799 				hsotg->remote_wakeup_allowed = 1;
1800 			else
1801 				hsotg->remote_wakeup_allowed = 0;
1802 			break;
1803 
1804 		case USB_DEVICE_TEST_MODE:
1805 			if ((wIndex & 0xff) != 0)
1806 				return -EINVAL;
1807 			if (!set)
1808 				return -EINVAL;
1809 
1810 			hsotg->test_mode = wIndex >> 8;
1811 			break;
1812 		default:
1813 			return -ENOENT;
1814 		}
1815 
1816 		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1817 		if (ret) {
1818 			dev_err(hsotg->dev,
1819 				"%s: failed to send reply\n", __func__);
1820 			return ret;
1821 		}
1822 		break;
1823 
1824 	case USB_RECIP_ENDPOINT:
1825 		ep = ep_from_windex(hsotg, wIndex);
1826 		if (!ep) {
1827 			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1828 				__func__, wIndex);
1829 			return -ENOENT;
1830 		}
1831 
1832 		switch (wValue) {
1833 		case USB_ENDPOINT_HALT:
1834 			halted = ep->halted;
1835 
1836 			if (!ep->wedged)
1837 				dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1838 
1839 			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1840 			if (ret) {
1841 				dev_err(hsotg->dev,
1842 					"%s: failed to send reply\n", __func__);
1843 				return ret;
1844 			}
1845 
1846 			/*
1847 			 * we have to complete all requests for ep if it was
1848 			 * halted, and the halt was cleared by CLEAR_FEATURE
1849 			 */
1850 
1851 			if (!set && halted) {
1852 				/*
1853 				 * If we have request in progress,
1854 				 * then complete it
1855 				 */
1856 				if (ep->req) {
1857 					hs_req = ep->req;
1858 					ep->req = NULL;
1859 					list_del_init(&hs_req->queue);
1860 					if (hs_req->req.complete) {
1861 						spin_unlock(&hsotg->lock);
1862 						usb_gadget_giveback_request(
1863 							&ep->ep, &hs_req->req);
1864 						spin_lock(&hsotg->lock);
1865 					}
1866 				}
1867 
1868 				/* If we have pending request, then start it */
1869 				if (!ep->req)
1870 					dwc2_gadget_start_next_request(ep);
1871 			}
1872 
1873 			break;
1874 
1875 		default:
1876 			return -ENOENT;
1877 		}
1878 		break;
1879 	default:
1880 		return -ENOENT;
1881 	}
1882 	return 1;
1883 }
1884 
1885 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
1886 
1887 /**
1888  * dwc2_hsotg_stall_ep0 - stall ep0
1889  * @hsotg: The device state
1890  *
1891  * Set stall for ep0 as response for setup request.
1892  */
dwc2_hsotg_stall_ep0(struct dwc2_hsotg * hsotg)1893 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
1894 {
1895 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1896 	u32 reg;
1897 	u32 ctrl;
1898 
1899 	dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1900 	reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1901 
1902 	/*
1903 	 * DxEPCTL_Stall will be cleared by EP once it has
1904 	 * taken effect, so no need to clear later.
1905 	 */
1906 
1907 	ctrl = dwc2_readl(hsotg, reg);
1908 	ctrl |= DXEPCTL_STALL;
1909 	ctrl |= DXEPCTL_CNAK;
1910 	dwc2_writel(hsotg, ctrl, reg);
1911 
1912 	dev_dbg(hsotg->dev,
1913 		"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1914 		ctrl, reg, dwc2_readl(hsotg, reg));
1915 
1916 	 /*
1917 	  * complete won't be called, so we enqueue
1918 	  * setup request here
1919 	  */
1920 	 dwc2_hsotg_enqueue_setup(hsotg);
1921 }
1922 
1923 /**
1924  * dwc2_hsotg_process_control - process a control request
1925  * @hsotg: The device state
1926  * @ctrl: The control request received
1927  *
1928  * The controller has received the SETUP phase of a control request, and
1929  * needs to work out what to do next (and whether to pass it on to the
1930  * gadget driver).
1931  */
dwc2_hsotg_process_control(struct dwc2_hsotg * hsotg,struct usb_ctrlrequest * ctrl)1932 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
1933 				       struct usb_ctrlrequest *ctrl)
1934 {
1935 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1936 	int ret = 0;
1937 	u32 dcfg;
1938 
1939 	dev_dbg(hsotg->dev,
1940 		"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
1941 		ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
1942 		ctrl->wIndex, ctrl->wLength);
1943 
1944 	if (ctrl->wLength == 0) {
1945 		ep0->dir_in = 1;
1946 		hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1947 	} else if (ctrl->bRequestType & USB_DIR_IN) {
1948 		ep0->dir_in = 1;
1949 		hsotg->ep0_state = DWC2_EP0_DATA_IN;
1950 	} else {
1951 		ep0->dir_in = 0;
1952 		hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1953 	}
1954 
1955 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1956 		switch (ctrl->bRequest) {
1957 		case USB_REQ_SET_ADDRESS:
1958 			hsotg->connected = 1;
1959 			dcfg = dwc2_readl(hsotg, DCFG);
1960 			dcfg &= ~DCFG_DEVADDR_MASK;
1961 			dcfg |= (le16_to_cpu(ctrl->wValue) <<
1962 				 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
1963 			dwc2_writel(hsotg, dcfg, DCFG);
1964 
1965 			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1966 
1967 			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1968 			return;
1969 
1970 		case USB_REQ_GET_STATUS:
1971 			ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
1972 			break;
1973 
1974 		case USB_REQ_CLEAR_FEATURE:
1975 		case USB_REQ_SET_FEATURE:
1976 			ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
1977 			break;
1978 		}
1979 	}
1980 
1981 	/* as a fallback, try delivering it to the driver to deal with */
1982 
1983 	if (ret == 0 && hsotg->driver) {
1984 		spin_unlock(&hsotg->lock);
1985 		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1986 		spin_lock(&hsotg->lock);
1987 		if (ret < 0)
1988 			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1989 	}
1990 
1991 	hsotg->delayed_status = false;
1992 	if (ret == USB_GADGET_DELAYED_STATUS)
1993 		hsotg->delayed_status = true;
1994 
1995 	/*
1996 	 * the request is either unhandlable, or is not formatted correctly
1997 	 * so respond with a STALL for the status stage to indicate failure.
1998 	 */
1999 
2000 	if (ret < 0)
2001 		dwc2_hsotg_stall_ep0(hsotg);
2002 }
2003 
2004 /**
2005  * dwc2_hsotg_complete_setup - completion of a setup transfer
2006  * @ep: The endpoint the request was on.
2007  * @req: The request completed.
2008  *
2009  * Called on completion of any requests the driver itself submitted for
2010  * EP0 setup packets
2011  */
dwc2_hsotg_complete_setup(struct usb_ep * ep,struct usb_request * req)2012 static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
2013 				      struct usb_request *req)
2014 {
2015 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
2016 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2017 
2018 	if (req->status < 0) {
2019 		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
2020 		return;
2021 	}
2022 
2023 	spin_lock(&hsotg->lock);
2024 	if (req->actual == 0)
2025 		dwc2_hsotg_enqueue_setup(hsotg);
2026 	else
2027 		dwc2_hsotg_process_control(hsotg, req->buf);
2028 	spin_unlock(&hsotg->lock);
2029 }
2030 
2031 /**
2032  * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
2033  * @hsotg: The device state.
2034  *
2035  * Enqueue a request on EP0 if necessary to received any SETUP packets
2036  * received from the host.
2037  */
dwc2_hsotg_enqueue_setup(struct dwc2_hsotg * hsotg)2038 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
2039 {
2040 	struct usb_request *req = hsotg->ctrl_req;
2041 	struct dwc2_hsotg_req *hs_req = our_req(req);
2042 	int ret;
2043 
2044 	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
2045 
2046 	req->zero = 0;
2047 	req->length = 8;
2048 	req->buf = hsotg->ctrl_buff;
2049 	req->complete = dwc2_hsotg_complete_setup;
2050 
2051 	if (!list_empty(&hs_req->queue)) {
2052 		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
2053 		return;
2054 	}
2055 
2056 	hsotg->eps_out[0]->dir_in = 0;
2057 	hsotg->eps_out[0]->send_zlp = 0;
2058 	hsotg->ep0_state = DWC2_EP0_SETUP;
2059 
2060 	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
2061 	if (ret < 0) {
2062 		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
2063 		/*
2064 		 * Don't think there's much we can do other than watch the
2065 		 * driver fail.
2066 		 */
2067 	}
2068 }
2069 
dwc2_hsotg_program_zlp(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep)2070 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
2071 				   struct dwc2_hsotg_ep *hs_ep)
2072 {
2073 	u32 ctrl;
2074 	u8 index = hs_ep->index;
2075 	u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2076 	u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
2077 
2078 	if (hs_ep->dir_in)
2079 		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
2080 			index);
2081 	else
2082 		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
2083 			index);
2084 	if (using_desc_dma(hsotg)) {
2085 		/* Not specific buffer needed for ep0 ZLP */
2086 		dma_addr_t dma = hs_ep->desc_list_dma;
2087 
2088 		if (!index)
2089 			dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
2090 
2091 		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
2092 	} else {
2093 		dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2094 			    DXEPTSIZ_XFERSIZE(0),
2095 			    epsiz_reg);
2096 	}
2097 
2098 	ctrl = dwc2_readl(hsotg, epctl_reg);
2099 	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
2100 	ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
2101 	ctrl |= DXEPCTL_USBACTEP;
2102 	dwc2_writel(hsotg, ctrl, epctl_reg);
2103 }
2104 
2105 /**
2106  * dwc2_hsotg_complete_request - complete a request given to us
2107  * @hsotg: The device state.
2108  * @hs_ep: The endpoint the request was on.
2109  * @hs_req: The request to complete.
2110  * @result: The result code (0 => Ok, otherwise errno)
2111  *
2112  * The given request has finished, so call the necessary completion
2113  * if it has one and then look to see if we can start a new request
2114  * on the endpoint.
2115  *
2116  * Note, expects the ep to already be locked as appropriate.
2117  */
dwc2_hsotg_complete_request(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,struct dwc2_hsotg_req * hs_req,int result)2118 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
2119 					struct dwc2_hsotg_ep *hs_ep,
2120 				       struct dwc2_hsotg_req *hs_req,
2121 				       int result)
2122 {
2123 	if (!hs_req) {
2124 		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
2125 		return;
2126 	}
2127 
2128 	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2129 		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2130 
2131 	/*
2132 	 * only replace the status if we've not already set an error
2133 	 * from a previous transaction
2134 	 */
2135 
2136 	if (hs_req->req.status == -EINPROGRESS)
2137 		hs_req->req.status = result;
2138 
2139 	if (using_dma(hsotg))
2140 		dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2141 
2142 	dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2143 
2144 	hs_ep->req = NULL;
2145 	list_del_init(&hs_req->queue);
2146 
2147 	/*
2148 	 * call the complete request with the locks off, just in case the
2149 	 * request tries to queue more work for this endpoint.
2150 	 */
2151 
2152 	if (hs_req->req.complete) {
2153 		spin_unlock(&hsotg->lock);
2154 		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2155 		spin_lock(&hsotg->lock);
2156 	}
2157 
2158 	/* In DDMA don't need to proceed to starting of next ISOC request */
2159 	if (using_desc_dma(hsotg) && hs_ep->isochronous)
2160 		return;
2161 
2162 	/*
2163 	 * Look to see if there is anything else to do. Note, the completion
2164 	 * of the previous request may have caused a new request to be started
2165 	 * so be careful when doing this.
2166 	 */
2167 
2168 	if (!hs_ep->req && result >= 0)
2169 		dwc2_gadget_start_next_request(hs_ep);
2170 }
2171 
2172 /*
2173  * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2174  * @hs_ep: The endpoint the request was on.
2175  *
2176  * Get first request from the ep queue, determine descriptor on which complete
2177  * happened. SW discovers which descriptor currently in use by HW, adjusts
2178  * dma_address and calculates index of completed descriptor based on the value
2179  * of DEPDMA register. Update actual length of request, giveback to gadget.
2180  */
dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep * hs_ep)2181 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
2182 {
2183 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2184 	struct dwc2_hsotg_req *hs_req;
2185 	struct usb_request *ureq;
2186 	u32 desc_sts;
2187 	u32 mask;
2188 
2189 	desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2190 
2191 	/* Process only descriptors with buffer status set to DMA done */
2192 	while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
2193 		DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
2194 
2195 		hs_req = get_ep_head(hs_ep);
2196 		if (!hs_req) {
2197 			dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2198 			return;
2199 		}
2200 		ureq = &hs_req->req;
2201 
2202 		/* Check completion status */
2203 		if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
2204 			DEV_DMA_STS_SUCC) {
2205 			mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2206 				DEV_DMA_ISOC_RX_NBYTES_MASK;
2207 			ureq->actual = ureq->length - ((desc_sts & mask) >>
2208 				DEV_DMA_ISOC_NBYTES_SHIFT);
2209 
2210 			/* Adjust actual len for ISOC Out if len is
2211 			 * not align of 4
2212 			 */
2213 			if (!hs_ep->dir_in && ureq->length & 0x3)
2214 				ureq->actual += 4 - (ureq->length & 0x3);
2215 
2216 			/* Set actual frame number for completed transfers */
2217 			ureq->frame_number =
2218 				(desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
2219 				DEV_DMA_ISOC_FRNUM_SHIFT;
2220 		}
2221 
2222 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2223 
2224 		hs_ep->compl_desc++;
2225 		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
2226 			hs_ep->compl_desc = 0;
2227 		desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2228 	}
2229 }
2230 
2231 /*
2232  * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
2233  * @hs_ep: The isochronous endpoint.
2234  *
2235  * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
2236  * interrupt. Reset target frame and next_desc to allow to start
2237  * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
2238  * interrupt for OUT direction.
2239  */
dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep * hs_ep)2240 static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
2241 {
2242 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2243 
2244 	if (!hs_ep->dir_in)
2245 		dwc2_flush_rx_fifo(hsotg);
2246 	dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
2247 
2248 	hs_ep->target_frame = TARGET_FRAME_INITIAL;
2249 	hs_ep->next_desc = 0;
2250 	hs_ep->compl_desc = 0;
2251 }
2252 
2253 /**
2254  * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2255  * @hsotg: The device state.
2256  * @ep_idx: The endpoint index for the data
2257  * @size: The size of data in the fifo, in bytes
2258  *
2259  * The FIFO status shows there is data to read from the FIFO for a given
2260  * endpoint, so sort out whether we need to read the data into a request
2261  * that has been made for that endpoint.
2262  */
dwc2_hsotg_rx_data(struct dwc2_hsotg * hsotg,int ep_idx,int size)2263 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
2264 {
2265 	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2266 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2267 	int to_read;
2268 	int max_req;
2269 	int read_ptr;
2270 
2271 	if (!hs_req) {
2272 		u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
2273 		int ptr;
2274 
2275 		dev_dbg(hsotg->dev,
2276 			"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
2277 			 __func__, size, ep_idx, epctl);
2278 
2279 		/* dump the data from the FIFO, we've nothing we can do */
2280 		for (ptr = 0; ptr < size; ptr += 4)
2281 			(void)dwc2_readl(hsotg, EPFIFO(ep_idx));
2282 
2283 		return;
2284 	}
2285 
2286 	to_read = size;
2287 	read_ptr = hs_req->req.actual;
2288 	max_req = hs_req->req.length - read_ptr;
2289 
2290 	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2291 		__func__, to_read, max_req, read_ptr, hs_req->req.length);
2292 
2293 	if (to_read > max_req) {
2294 		/*
2295 		 * more data appeared than we where willing
2296 		 * to deal with in this request.
2297 		 */
2298 
2299 		/* currently we don't deal this */
2300 		WARN_ON_ONCE(1);
2301 	}
2302 
2303 	hs_ep->total_data += to_read;
2304 	hs_req->req.actual += to_read;
2305 	to_read = DIV_ROUND_UP(to_read, 4);
2306 
2307 	/*
2308 	 * note, we might over-write the buffer end by 3 bytes depending on
2309 	 * alignment of the data.
2310 	 */
2311 	dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
2312 		       hs_req->req.buf + read_ptr, to_read);
2313 }
2314 
2315 /**
2316  * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2317  * @hsotg: The device instance
2318  * @dir_in: If IN zlp
2319  *
2320  * Generate a zero-length IN packet request for terminating a SETUP
2321  * transaction.
2322  *
2323  * Note, since we don't write any data to the TxFIFO, then it is
2324  * currently believed that we do not need to wait for any space in
2325  * the TxFIFO.
2326  */
dwc2_hsotg_ep0_zlp(struct dwc2_hsotg * hsotg,bool dir_in)2327 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
2328 {
2329 	/* eps_out[0] is used in both directions */
2330 	hsotg->eps_out[0]->dir_in = dir_in;
2331 	hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2332 
2333 	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2334 }
2335 
2336 /*
2337  * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2338  * @hs_ep - The endpoint on which transfer went
2339  *
2340  * Iterate over endpoints descriptor chain and get info on bytes remained
2341  * in DMA descriptors after transfer has completed. Used for non isoc EPs.
2342  */
dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep * hs_ep)2343 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
2344 {
2345 	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
2346 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2347 	unsigned int bytes_rem = 0;
2348 	unsigned int bytes_rem_correction = 0;
2349 	struct dwc2_dma_desc *desc = hs_ep->desc_list;
2350 	int i;
2351 	u32 status;
2352 	u32 mps = hs_ep->ep.maxpacket;
2353 	int dir_in = hs_ep->dir_in;
2354 
2355 	if (!desc)
2356 		return -EINVAL;
2357 
2358 	/* Interrupt OUT EP with mps not multiple of 4 */
2359 	if (hs_ep->index)
2360 		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
2361 			bytes_rem_correction = 4 - (mps % 4);
2362 
2363 	for (i = 0; i < hs_ep->desc_count; ++i) {
2364 		status = desc->status;
2365 		bytes_rem += status & DEV_DMA_NBYTES_MASK;
2366 		bytes_rem -= bytes_rem_correction;
2367 
2368 		if (status & DEV_DMA_STS_MASK)
2369 			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2370 				i, status & DEV_DMA_STS_MASK);
2371 
2372 		if (status & DEV_DMA_L)
2373 			break;
2374 
2375 		desc++;
2376 	}
2377 
2378 	return bytes_rem;
2379 }
2380 
2381 /**
2382  * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2383  * @hsotg: The device instance
2384  * @epnum: The endpoint received from
2385  *
2386  * The RXFIFO has delivered an OutDone event, which means that the data
2387  * transfer for an OUT endpoint has been completed, either by a short
2388  * packet or by the finish of a transfer.
2389  */
dwc2_hsotg_handle_outdone(struct dwc2_hsotg * hsotg,int epnum)2390 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
2391 {
2392 	u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
2393 	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2394 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2395 	struct usb_request *req = &hs_req->req;
2396 	unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2397 	int result = 0;
2398 
2399 	if (!hs_req) {
2400 		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
2401 		return;
2402 	}
2403 
2404 	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2405 		dev_dbg(hsotg->dev, "zlp packet received\n");
2406 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2407 		dwc2_hsotg_enqueue_setup(hsotg);
2408 		return;
2409 	}
2410 
2411 	if (using_desc_dma(hsotg))
2412 		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2413 
2414 	if (using_dma(hsotg)) {
2415 		unsigned int size_done;
2416 
2417 		/*
2418 		 * Calculate the size of the transfer by checking how much
2419 		 * is left in the endpoint size register and then working it
2420 		 * out from the amount we loaded for the transfer.
2421 		 *
2422 		 * We need to do this as DMA pointers are always 32bit aligned
2423 		 * so may overshoot/undershoot the transfer.
2424 		 */
2425 
2426 		size_done = hs_ep->size_loaded - size_left;
2427 		size_done += hs_ep->last_load;
2428 
2429 		req->actual = size_done;
2430 	}
2431 
2432 	/* if there is more request to do, schedule new transfer */
2433 	if (req->actual < req->length && size_left == 0) {
2434 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2435 		return;
2436 	}
2437 
2438 	if (req->actual < req->length && req->short_not_ok) {
2439 		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2440 			__func__, req->actual, req->length);
2441 
2442 		/*
2443 		 * todo - what should we return here? there's no one else
2444 		 * even bothering to check the status.
2445 		 */
2446 	}
2447 
2448 	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
2449 	if (!using_desc_dma(hsotg) && epnum == 0 &&
2450 	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2451 		/* Move to STATUS IN */
2452 		if (!hsotg->delayed_status)
2453 			dwc2_hsotg_ep0_zlp(hsotg, true);
2454 	}
2455 
2456 	/* Set actual frame number for completed transfers */
2457 	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2458 		req->frame_number = hs_ep->target_frame;
2459 		dwc2_gadget_incr_frame_num(hs_ep);
2460 	}
2461 
2462 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2463 }
2464 
2465 /**
2466  * dwc2_hsotg_handle_rx - RX FIFO has data
2467  * @hsotg: The device instance
2468  *
2469  * The IRQ handler has detected that the RX FIFO has some data in it
2470  * that requires processing, so find out what is in there and do the
2471  * appropriate read.
2472  *
2473  * The RXFIFO is a true FIFO, the packets coming out are still in packet
2474  * chunks, so if you have x packets received on an endpoint you'll get x
2475  * FIFO events delivered, each with a packet's worth of data in it.
2476  *
2477  * When using DMA, we should not be processing events from the RXFIFO
2478  * as the actual data should be sent to the memory directly and we turn
2479  * on the completion interrupts to get notifications of transfer completion.
2480  */
dwc2_hsotg_handle_rx(struct dwc2_hsotg * hsotg)2481 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
2482 {
2483 	u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
2484 	u32 epnum, status, size;
2485 
2486 	WARN_ON(using_dma(hsotg));
2487 
2488 	epnum = grxstsr & GRXSTS_EPNUM_MASK;
2489 	status = grxstsr & GRXSTS_PKTSTS_MASK;
2490 
2491 	size = grxstsr & GRXSTS_BYTECNT_MASK;
2492 	size >>= GRXSTS_BYTECNT_SHIFT;
2493 
2494 	dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2495 		__func__, grxstsr, size, epnum);
2496 
2497 	switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
2498 	case GRXSTS_PKTSTS_GLOBALOUTNAK:
2499 		dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2500 		break;
2501 
2502 	case GRXSTS_PKTSTS_OUTDONE:
2503 		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2504 			dwc2_hsotg_read_frameno(hsotg));
2505 
2506 		if (!using_dma(hsotg))
2507 			dwc2_hsotg_handle_outdone(hsotg, epnum);
2508 		break;
2509 
2510 	case GRXSTS_PKTSTS_SETUPDONE:
2511 		dev_dbg(hsotg->dev,
2512 			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2513 			dwc2_hsotg_read_frameno(hsotg),
2514 			dwc2_readl(hsotg, DOEPCTL(0)));
2515 		/*
2516 		 * Call dwc2_hsotg_handle_outdone here if it was not called from
2517 		 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
2518 		 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
2519 		 */
2520 		if (hsotg->ep0_state == DWC2_EP0_SETUP)
2521 			dwc2_hsotg_handle_outdone(hsotg, epnum);
2522 		break;
2523 
2524 	case GRXSTS_PKTSTS_OUTRX:
2525 		dwc2_hsotg_rx_data(hsotg, epnum, size);
2526 		break;
2527 
2528 	case GRXSTS_PKTSTS_SETUPRX:
2529 		dev_dbg(hsotg->dev,
2530 			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2531 			dwc2_hsotg_read_frameno(hsotg),
2532 			dwc2_readl(hsotg, DOEPCTL(0)));
2533 
2534 		WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
2535 
2536 		dwc2_hsotg_rx_data(hsotg, epnum, size);
2537 		break;
2538 
2539 	default:
2540 		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
2541 			 __func__, grxstsr);
2542 
2543 		dwc2_hsotg_dump(hsotg);
2544 		break;
2545 	}
2546 }
2547 
2548 /**
2549  * dwc2_hsotg_ep0_mps - turn max packet size into register setting
2550  * @mps: The maximum packet size in bytes.
2551  */
dwc2_hsotg_ep0_mps(unsigned int mps)2552 static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
2553 {
2554 	switch (mps) {
2555 	case 64:
2556 		return D0EPCTL_MPS_64;
2557 	case 32:
2558 		return D0EPCTL_MPS_32;
2559 	case 16:
2560 		return D0EPCTL_MPS_16;
2561 	case 8:
2562 		return D0EPCTL_MPS_8;
2563 	}
2564 
2565 	/* bad max packet size, warn and return invalid result */
2566 	WARN_ON(1);
2567 	return (u32)-1;
2568 }
2569 
2570 /**
2571  * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
2572  * @hsotg: The driver state.
2573  * @ep: The index number of the endpoint
2574  * @mps: The maximum packet size in bytes
2575  * @mc: The multicount value
2576  * @dir_in: True if direction is in.
2577  *
2578  * Configure the maximum packet size for the given endpoint, updating
2579  * the hardware control registers to reflect this.
2580  */
dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg * hsotg,unsigned int ep,unsigned int mps,unsigned int mc,unsigned int dir_in)2581 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
2582 					unsigned int ep, unsigned int mps,
2583 					unsigned int mc, unsigned int dir_in)
2584 {
2585 	struct dwc2_hsotg_ep *hs_ep;
2586 	u32 reg;
2587 
2588 	hs_ep = index_to_ep(hsotg, ep, dir_in);
2589 	if (!hs_ep)
2590 		return;
2591 
2592 	if (ep == 0) {
2593 		u32 mps_bytes = mps;
2594 
2595 		/* EP0 is a special case */
2596 		mps = dwc2_hsotg_ep0_mps(mps_bytes);
2597 		if (mps > 3)
2598 			goto bad_mps;
2599 		hs_ep->ep.maxpacket = mps_bytes;
2600 		hs_ep->mc = 1;
2601 	} else {
2602 		if (mps > 1024)
2603 			goto bad_mps;
2604 		hs_ep->mc = mc;
2605 		if (mc > 3)
2606 			goto bad_mps;
2607 		hs_ep->ep.maxpacket = mps;
2608 	}
2609 
2610 	if (dir_in) {
2611 		reg = dwc2_readl(hsotg, DIEPCTL(ep));
2612 		reg &= ~DXEPCTL_MPS_MASK;
2613 		reg |= mps;
2614 		dwc2_writel(hsotg, reg, DIEPCTL(ep));
2615 	} else {
2616 		reg = dwc2_readl(hsotg, DOEPCTL(ep));
2617 		reg &= ~DXEPCTL_MPS_MASK;
2618 		reg |= mps;
2619 		dwc2_writel(hsotg, reg, DOEPCTL(ep));
2620 	}
2621 
2622 	return;
2623 
2624 bad_mps:
2625 	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
2626 }
2627 
2628 /**
2629  * dwc2_hsotg_txfifo_flush - flush Tx FIFO
2630  * @hsotg: The driver state
2631  * @idx: The index for the endpoint (0..15)
2632  */
dwc2_hsotg_txfifo_flush(struct dwc2_hsotg * hsotg,unsigned int idx)2633 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
2634 {
2635 	dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
2636 		    GRSTCTL);
2637 
2638 	/* wait until the fifo is flushed */
2639 	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
2640 		dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
2641 			 __func__);
2642 }
2643 
2644 /**
2645  * dwc2_hsotg_trytx - check to see if anything needs transmitting
2646  * @hsotg: The driver state
2647  * @hs_ep: The driver endpoint to check.
2648  *
2649  * Check to see if there is a request that has data to send, and if so
2650  * make an attempt to write data into the FIFO.
2651  */
dwc2_hsotg_trytx(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep)2652 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
2653 			    struct dwc2_hsotg_ep *hs_ep)
2654 {
2655 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2656 
2657 	if (!hs_ep->dir_in || !hs_req) {
2658 		/**
2659 		 * if request is not enqueued, we disable interrupts
2660 		 * for endpoints, excepting ep0
2661 		 */
2662 		if (hs_ep->index != 0)
2663 			dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
2664 					      hs_ep->dir_in, 0);
2665 		return 0;
2666 	}
2667 
2668 	if (hs_req->req.actual < hs_req->req.length) {
2669 		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
2670 			hs_ep->index);
2671 		return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2672 	}
2673 
2674 	return 0;
2675 }
2676 
2677 /**
2678  * dwc2_hsotg_complete_in - complete IN transfer
2679  * @hsotg: The device state.
2680  * @hs_ep: The endpoint that has just completed.
2681  *
2682  * An IN transfer has been completed, update the transfer's state and then
2683  * call the relevant completion routines.
2684  */
dwc2_hsotg_complete_in(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep)2685 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
2686 				   struct dwc2_hsotg_ep *hs_ep)
2687 {
2688 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2689 	u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
2690 	int size_left, size_done;
2691 
2692 	if (!hs_req) {
2693 		dev_dbg(hsotg->dev, "XferCompl but no req\n");
2694 		return;
2695 	}
2696 
2697 	/* Finish ZLP handling for IN EP0 transactions */
2698 	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
2699 		dev_dbg(hsotg->dev, "zlp packet sent\n");
2700 
2701 		/*
2702 		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
2703 		 * changed to IN. Change back to complete OUT transfer request
2704 		 */
2705 		hs_ep->dir_in = 0;
2706 
2707 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2708 		if (hsotg->test_mode) {
2709 			int ret;
2710 
2711 			ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
2712 			if (ret < 0) {
2713 				dev_dbg(hsotg->dev, "Invalid Test #%d\n",
2714 					hsotg->test_mode);
2715 				dwc2_hsotg_stall_ep0(hsotg);
2716 				return;
2717 			}
2718 		}
2719 		dwc2_hsotg_enqueue_setup(hsotg);
2720 		return;
2721 	}
2722 
2723 	/*
2724 	 * Calculate the size of the transfer by checking how much is left
2725 	 * in the endpoint size register and then working it out from
2726 	 * the amount we loaded for the transfer.
2727 	 *
2728 	 * We do this even for DMA, as the transfer may have incremented
2729 	 * past the end of the buffer (DMA transfers are always 32bit
2730 	 * aligned).
2731 	 */
2732 	if (using_desc_dma(hsotg)) {
2733 		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2734 		if (size_left < 0)
2735 			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
2736 				size_left);
2737 	} else {
2738 		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2739 	}
2740 
2741 	size_done = hs_ep->size_loaded - size_left;
2742 	size_done += hs_ep->last_load;
2743 
2744 	if (hs_req->req.actual != size_done)
2745 		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
2746 			__func__, hs_req->req.actual, size_done);
2747 
2748 	hs_req->req.actual = size_done;
2749 	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
2750 		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2751 
2752 	if (!size_left && hs_req->req.actual < hs_req->req.length) {
2753 		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
2754 		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2755 		return;
2756 	}
2757 
2758 	/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
2759 	if (hs_ep->send_zlp) {
2760 		hs_ep->send_zlp = 0;
2761 		if (!using_desc_dma(hsotg)) {
2762 			dwc2_hsotg_program_zlp(hsotg, hs_ep);
2763 			/* transfer will be completed on next complete interrupt */
2764 			return;
2765 		}
2766 	}
2767 
2768 	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
2769 		/* Move to STATUS OUT */
2770 		dwc2_hsotg_ep0_zlp(hsotg, false);
2771 		return;
2772 	}
2773 
2774 	/* Set actual frame number for completed transfers */
2775 	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2776 		hs_req->req.frame_number = hs_ep->target_frame;
2777 		dwc2_gadget_incr_frame_num(hs_ep);
2778 	}
2779 
2780 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2781 }
2782 
2783 /**
2784  * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
2785  * @hsotg: The device state.
2786  * @idx: Index of ep.
2787  * @dir_in: Endpoint direction 1-in 0-out.
2788  *
2789  * Reads for endpoint with given index and direction, by masking
2790  * epint_reg with coresponding mask.
2791  */
dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg * hsotg,unsigned int idx,int dir_in)2792 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
2793 					  unsigned int idx, int dir_in)
2794 {
2795 	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
2796 	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2797 	u32 ints;
2798 	u32 mask;
2799 	u32 diepempmsk;
2800 
2801 	mask = dwc2_readl(hsotg, epmsk_reg);
2802 	diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
2803 	mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
2804 	mask |= DXEPINT_SETUP_RCVD;
2805 
2806 	ints = dwc2_readl(hsotg, epint_reg);
2807 	ints &= mask;
2808 	return ints;
2809 }
2810 
2811 /**
2812  * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
2813  * @hs_ep: The endpoint on which interrupt is asserted.
2814  *
2815  * This interrupt indicates that the endpoint has been disabled per the
2816  * application's request.
2817  *
2818  * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
2819  * in case of ISOC completes current request.
2820  *
2821  * For ISOC-OUT endpoints completes expired requests. If there is remaining
2822  * request starts it.
2823  */
dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep * hs_ep)2824 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2825 {
2826 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2827 	struct dwc2_hsotg_req *hs_req;
2828 	unsigned char idx = hs_ep->index;
2829 	int dir_in = hs_ep->dir_in;
2830 	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2831 	int dctl = dwc2_readl(hsotg, DCTL);
2832 
2833 	dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
2834 
2835 	if (dir_in) {
2836 		int epctl = dwc2_readl(hsotg, epctl_reg);
2837 
2838 		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
2839 
2840 		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
2841 			int dctl = dwc2_readl(hsotg, DCTL);
2842 
2843 			dctl |= DCTL_CGNPINNAK;
2844 			dwc2_writel(hsotg, dctl, DCTL);
2845 		}
2846 	} else {
2847 
2848 		if (dctl & DCTL_GOUTNAKSTS) {
2849 			dctl |= DCTL_CGOUTNAK;
2850 			dwc2_writel(hsotg, dctl, DCTL);
2851 		}
2852 	}
2853 
2854 	if (!hs_ep->isochronous)
2855 		return;
2856 
2857 	if (list_empty(&hs_ep->queue)) {
2858 		dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
2859 			__func__, hs_ep);
2860 		return;
2861 	}
2862 
2863 	do {
2864 		hs_req = get_ep_head(hs_ep);
2865 		if (hs_req) {
2866 			hs_req->req.frame_number = hs_ep->target_frame;
2867 			hs_req->req.actual = 0;
2868 			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2869 						    -ENODATA);
2870 		}
2871 		dwc2_gadget_incr_frame_num(hs_ep);
2872 		/* Update current frame number value. */
2873 		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2874 	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
2875 }
2876 
2877 /**
2878  * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
2879  * @ep: The endpoint on which interrupt is asserted.
2880  *
2881  * This is starting point for ISOC-OUT transfer, synchronization done with
2882  * first out token received from host while corresponding EP is disabled.
2883  *
2884  * Device does not know initial frame in which out token will come. For this
2885  * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
2886  * getting this interrupt SW starts calculation for next transfer frame.
2887  */
dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep * ep)2888 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2889 {
2890 	struct dwc2_hsotg *hsotg = ep->parent;
2891 	struct dwc2_hsotg_req *hs_req;
2892 	int dir_in = ep->dir_in;
2893 
2894 	if (dir_in || !ep->isochronous)
2895 		return;
2896 
2897 	if (using_desc_dma(hsotg)) {
2898 		if (ep->target_frame == TARGET_FRAME_INITIAL) {
2899 			/* Start first ISO Out */
2900 			ep->target_frame = hsotg->frame_number;
2901 			dwc2_gadget_start_isoc_ddma(ep);
2902 		}
2903 		return;
2904 	}
2905 
2906 	if (ep->target_frame == TARGET_FRAME_INITIAL) {
2907 		u32 ctrl;
2908 
2909 		ep->target_frame = hsotg->frame_number;
2910 		if (ep->interval > 1) {
2911 			ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
2912 			if (ep->target_frame & 0x1)
2913 				ctrl |= DXEPCTL_SETODDFR;
2914 			else
2915 				ctrl |= DXEPCTL_SETEVENFR;
2916 
2917 			dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
2918 		}
2919 	}
2920 
2921 	while (dwc2_gadget_target_frame_elapsed(ep)) {
2922 		hs_req = get_ep_head(ep);
2923 		if (hs_req) {
2924 			hs_req->req.frame_number = ep->target_frame;
2925 			hs_req->req.actual = 0;
2926 			dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2927 		}
2928 
2929 		dwc2_gadget_incr_frame_num(ep);
2930 		/* Update current frame number value. */
2931 		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2932 	}
2933 
2934 	if (!ep->req)
2935 		dwc2_gadget_start_next_request(ep);
2936 
2937 }
2938 
2939 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
2940 				   struct dwc2_hsotg_ep *hs_ep);
2941 
2942 /**
2943  * dwc2_gadget_handle_nak - handle NAK interrupt
2944  * @hs_ep: The endpoint on which interrupt is asserted.
2945  *
2946  * This is starting point for ISOC-IN transfer, synchronization done with
2947  * first IN token received from host while corresponding EP is disabled.
2948  *
2949  * Device does not know when first one token will arrive from host. On first
2950  * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
2951  * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
2952  * sent in response to that as there was no data in FIFO. SW is basing on this
2953  * interrupt to obtain frame in which token has come and then based on the
2954  * interval calculates next frame for transfer.
2955  */
dwc2_gadget_handle_nak(struct dwc2_hsotg_ep * hs_ep)2956 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2957 {
2958 	struct dwc2_hsotg *hsotg = hs_ep->parent;
2959 	struct dwc2_hsotg_req *hs_req;
2960 	int dir_in = hs_ep->dir_in;
2961 	u32 ctrl;
2962 
2963 	if (!dir_in || !hs_ep->isochronous)
2964 		return;
2965 
2966 	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
2967 
2968 		if (using_desc_dma(hsotg)) {
2969 			hs_ep->target_frame = hsotg->frame_number;
2970 			dwc2_gadget_incr_frame_num(hs_ep);
2971 
2972 			/* In service interval mode target_frame must
2973 			 * be set to last (u)frame of the service interval.
2974 			 */
2975 			if (hsotg->params.service_interval) {
2976 				/* Set target_frame to the first (u)frame of
2977 				 * the service interval
2978 				 */
2979 				hs_ep->target_frame &= ~hs_ep->interval + 1;
2980 
2981 				/* Set target_frame to the last (u)frame of
2982 				 * the service interval
2983 				 */
2984 				dwc2_gadget_incr_frame_num(hs_ep);
2985 				dwc2_gadget_dec_frame_num_by_one(hs_ep);
2986 			}
2987 
2988 			dwc2_gadget_start_isoc_ddma(hs_ep);
2989 			return;
2990 		}
2991 
2992 		hs_ep->target_frame = hsotg->frame_number;
2993 		if (hs_ep->interval > 1) {
2994 			u32 ctrl = dwc2_readl(hsotg,
2995 					      DIEPCTL(hs_ep->index));
2996 			if (hs_ep->target_frame & 0x1)
2997 				ctrl |= DXEPCTL_SETODDFR;
2998 			else
2999 				ctrl |= DXEPCTL_SETEVENFR;
3000 
3001 			dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
3002 		}
3003 	}
3004 
3005 	if (using_desc_dma(hsotg))
3006 		return;
3007 
3008 	ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
3009 	if (ctrl & DXEPCTL_EPENA)
3010 		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
3011 	else
3012 		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
3013 
3014 	while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
3015 		hs_req = get_ep_head(hs_ep);
3016 		if (hs_req) {
3017 			hs_req->req.frame_number = hs_ep->target_frame;
3018 			hs_req->req.actual = 0;
3019 			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3020 		}
3021 
3022 		dwc2_gadget_incr_frame_num(hs_ep);
3023 		/* Update current frame number value. */
3024 		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3025 	}
3026 
3027 	if (!hs_ep->req)
3028 		dwc2_gadget_start_next_request(hs_ep);
3029 }
3030 
3031 /**
3032  * dwc2_hsotg_epint - handle an in/out endpoint interrupt
3033  * @hsotg: The driver state
3034  * @idx: The index for the endpoint (0..15)
3035  * @dir_in: Set if this is an IN endpoint
3036  *
3037  * Process and clear any interrupt pending for an individual endpoint
3038  */
dwc2_hsotg_epint(struct dwc2_hsotg * hsotg,unsigned int idx,int dir_in)3039 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
3040 			     int dir_in)
3041 {
3042 	struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
3043 	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
3044 	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
3045 	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
3046 	u32 ints;
3047 
3048 	ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
3049 
3050 	/* Clear endpoint interrupts */
3051 	dwc2_writel(hsotg, ints, epint_reg);
3052 
3053 	if (!hs_ep) {
3054 		dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
3055 			__func__, idx, dir_in ? "in" : "out");
3056 		return;
3057 	}
3058 
3059 	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
3060 		__func__, idx, dir_in ? "in" : "out", ints);
3061 
3062 	/* Don't process XferCompl interrupt if it is a setup packet */
3063 	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
3064 		ints &= ~DXEPINT_XFERCOMPL;
3065 
3066 	/*
3067 	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
3068 	 * stage and xfercomplete was generated without SETUP phase done
3069 	 * interrupt. SW should parse received setup packet only after host's
3070 	 * exit from setup phase of control transfer.
3071 	 */
3072 	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
3073 	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
3074 		ints &= ~DXEPINT_XFERCOMPL;
3075 
3076 	if (ints & DXEPINT_XFERCOMPL) {
3077 		dev_dbg(hsotg->dev,
3078 			"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
3079 			__func__, dwc2_readl(hsotg, epctl_reg),
3080 			dwc2_readl(hsotg, epsiz_reg));
3081 
3082 		/* In DDMA handle isochronous requests separately */
3083 		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
3084 			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
3085 		} else if (dir_in) {
3086 			/*
3087 			 * We get OutDone from the FIFO, so we only
3088 			 * need to look at completing IN requests here
3089 			 * if operating slave mode
3090 			 */
3091 			if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
3092 				dwc2_hsotg_complete_in(hsotg, hs_ep);
3093 
3094 			if (idx == 0 && !hs_ep->req)
3095 				dwc2_hsotg_enqueue_setup(hsotg);
3096 		} else if (using_dma(hsotg)) {
3097 			/*
3098 			 * We're using DMA, we need to fire an OutDone here
3099 			 * as we ignore the RXFIFO.
3100 			 */
3101 			if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
3102 				dwc2_hsotg_handle_outdone(hsotg, idx);
3103 		}
3104 	}
3105 
3106 	if (ints & DXEPINT_EPDISBLD)
3107 		dwc2_gadget_handle_ep_disabled(hs_ep);
3108 
3109 	if (ints & DXEPINT_OUTTKNEPDIS)
3110 		dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
3111 
3112 	if (ints & DXEPINT_NAKINTRPT)
3113 		dwc2_gadget_handle_nak(hs_ep);
3114 
3115 	if (ints & DXEPINT_AHBERR)
3116 		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
3117 
3118 	if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
3119 		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
3120 
3121 		if (using_dma(hsotg) && idx == 0) {
3122 			/*
3123 			 * this is the notification we've received a
3124 			 * setup packet. In non-DMA mode we'd get this
3125 			 * from the RXFIFO, instead we need to process
3126 			 * the setup here.
3127 			 */
3128 
3129 			if (dir_in)
3130 				WARN_ON_ONCE(1);
3131 			else
3132 				dwc2_hsotg_handle_outdone(hsotg, 0);
3133 		}
3134 	}
3135 
3136 	if (ints & DXEPINT_STSPHSERCVD) {
3137 		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3138 
3139 		/* Safety check EP0 state when STSPHSERCVD asserted */
3140 		if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
3141 			/* Move to STATUS IN for DDMA */
3142 			if (using_desc_dma(hsotg)) {
3143 				if (!hsotg->delayed_status)
3144 					dwc2_hsotg_ep0_zlp(hsotg, true);
3145 				else
3146 				/* In case of 3 stage Control Write with delayed
3147 				 * status, when Status IN transfer started
3148 				 * before STSPHSERCVD asserted, NAKSTS bit not
3149 				 * cleared by CNAK in dwc2_hsotg_start_req()
3150 				 * function. Clear now NAKSTS to allow complete
3151 				 * transfer.
3152 				 */
3153 					dwc2_set_bit(hsotg, DIEPCTL(0),
3154 						     DXEPCTL_CNAK);
3155 			}
3156 		}
3157 
3158 	}
3159 
3160 	if (ints & DXEPINT_BACK2BACKSETUP)
3161 		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3162 
3163 	if (ints & DXEPINT_BNAINTR) {
3164 		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
3165 		if (hs_ep->isochronous)
3166 			dwc2_gadget_handle_isoc_bna(hs_ep);
3167 	}
3168 
3169 	if (dir_in && !hs_ep->isochronous) {
3170 		/* not sure if this is important, but we'll clear it anyway */
3171 		if (ints & DXEPINT_INTKNTXFEMP) {
3172 			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3173 				__func__, idx);
3174 		}
3175 
3176 		/* this probably means something bad is happening */
3177 		if (ints & DXEPINT_INTKNEPMIS) {
3178 			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3179 				 __func__, idx);
3180 		}
3181 
3182 		/* FIFO has space or is empty (see GAHBCFG) */
3183 		if (hsotg->dedicated_fifos &&
3184 		    ints & DXEPINT_TXFEMP) {
3185 			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3186 				__func__, idx);
3187 			if (!using_dma(hsotg))
3188 				dwc2_hsotg_trytx(hsotg, hs_ep);
3189 		}
3190 	}
3191 }
3192 
3193 /**
3194  * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3195  * @hsotg: The device state.
3196  *
3197  * Handle updating the device settings after the enumeration phase has
3198  * been completed.
3199  */
dwc2_hsotg_irq_enumdone(struct dwc2_hsotg * hsotg)3200 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
3201 {
3202 	u32 dsts = dwc2_readl(hsotg, DSTS);
3203 	int ep0_mps = 0, ep_mps = 8;
3204 
3205 	/*
3206 	 * This should signal the finish of the enumeration phase
3207 	 * of the USB handshaking, so we should now know what rate
3208 	 * we connected at.
3209 	 */
3210 
3211 	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3212 
3213 	/*
3214 	 * note, since we're limited by the size of transfer on EP0, and
3215 	 * it seems IN transfers must be a even number of packets we do
3216 	 * not advertise a 64byte MPS on EP0.
3217 	 */
3218 
3219 	/* catch both EnumSpd_FS and EnumSpd_FS48 */
3220 	switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
3221 	case DSTS_ENUMSPD_FS:
3222 	case DSTS_ENUMSPD_FS48:
3223 		hsotg->gadget.speed = USB_SPEED_FULL;
3224 		ep0_mps = EP0_MPS_LIMIT;
3225 		ep_mps = 1023;
3226 		break;
3227 
3228 	case DSTS_ENUMSPD_HS:
3229 		hsotg->gadget.speed = USB_SPEED_HIGH;
3230 		ep0_mps = EP0_MPS_LIMIT;
3231 		ep_mps = 1024;
3232 		break;
3233 
3234 	case DSTS_ENUMSPD_LS:
3235 		hsotg->gadget.speed = USB_SPEED_LOW;
3236 		ep0_mps = 8;
3237 		ep_mps = 8;
3238 		/*
3239 		 * note, we don't actually support LS in this driver at the
3240 		 * moment, and the documentation seems to imply that it isn't
3241 		 * supported by the PHYs on some of the devices.
3242 		 */
3243 		break;
3244 	}
3245 	dev_info(hsotg->dev, "new device is %s\n",
3246 		 usb_speed_string(hsotg->gadget.speed));
3247 
3248 	/*
3249 	 * we should now know the maximum packet size for an
3250 	 * endpoint, so set the endpoints to a default value.
3251 	 */
3252 
3253 	if (ep0_mps) {
3254 		int i;
3255 		/* Initialize ep0 for both in and out directions */
3256 		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
3257 		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
3258 		for (i = 1; i < hsotg->num_of_eps; i++) {
3259 			if (hsotg->eps_in[i])
3260 				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3261 							    0, 1);
3262 			if (hsotg->eps_out[i])
3263 				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3264 							    0, 0);
3265 		}
3266 	}
3267 
3268 	/* ensure after enumeration our EP0 is active */
3269 
3270 	dwc2_hsotg_enqueue_setup(hsotg);
3271 
3272 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3273 		dwc2_readl(hsotg, DIEPCTL0),
3274 		dwc2_readl(hsotg, DOEPCTL0));
3275 }
3276 
3277 /**
3278  * kill_all_requests - remove all requests from the endpoint's queue
3279  * @hsotg: The device state.
3280  * @ep: The endpoint the requests may be on.
3281  * @result: The result code to use.
3282  *
3283  * Go through the requests on the given endpoint and mark them
3284  * completed with the given result code.
3285  */
kill_all_requests(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * ep,int result)3286 static void kill_all_requests(struct dwc2_hsotg *hsotg,
3287 			      struct dwc2_hsotg_ep *ep,
3288 			      int result)
3289 {
3290 	unsigned int size;
3291 
3292 	ep->req = NULL;
3293 
3294 	while (!list_empty(&ep->queue)) {
3295 		struct dwc2_hsotg_req *req = get_ep_head(ep);
3296 
3297 		dwc2_hsotg_complete_request(hsotg, ep, req, result);
3298 	}
3299 
3300 	if (!hsotg->dedicated_fifos)
3301 		return;
3302 	size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3303 	if (size < ep->fifo_size)
3304 		dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3305 }
3306 
3307 /**
3308  * dwc2_hsotg_disconnect - disconnect service
3309  * @hsotg: The device state.
3310  *
3311  * The device has been disconnected. Remove all current
3312  * transactions and signal the gadget driver that this
3313  * has happened.
3314  */
dwc2_hsotg_disconnect(struct dwc2_hsotg * hsotg)3315 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3316 {
3317 	unsigned int ep;
3318 
3319 	if (!hsotg->connected)
3320 		return;
3321 
3322 	hsotg->connected = 0;
3323 	hsotg->test_mode = 0;
3324 
3325 	/* all endpoints should be shutdown */
3326 	for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3327 		if (hsotg->eps_in[ep])
3328 			kill_all_requests(hsotg, hsotg->eps_in[ep],
3329 					  -ESHUTDOWN);
3330 		if (hsotg->eps_out[ep])
3331 			kill_all_requests(hsotg, hsotg->eps_out[ep],
3332 					  -ESHUTDOWN);
3333 	}
3334 
3335 	call_gadget(hsotg, disconnect);
3336 	hsotg->lx_state = DWC2_L3;
3337 
3338 	usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3339 }
3340 
3341 /**
3342  * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3343  * @hsotg: The device state:
3344  * @periodic: True if this is a periodic FIFO interrupt
3345  */
dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg * hsotg,bool periodic)3346 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3347 {
3348 	struct dwc2_hsotg_ep *ep;
3349 	int epno, ret;
3350 
3351 	/* look through for any more data to transmit */
3352 	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3353 		ep = index_to_ep(hsotg, epno, 1);
3354 
3355 		if (!ep)
3356 			continue;
3357 
3358 		if (!ep->dir_in)
3359 			continue;
3360 
3361 		if ((periodic && !ep->periodic) ||
3362 		    (!periodic && ep->periodic))
3363 			continue;
3364 
3365 		ret = dwc2_hsotg_trytx(hsotg, ep);
3366 		if (ret < 0)
3367 			break;
3368 	}
3369 }
3370 
3371 /* IRQ flags which will trigger a retry around the IRQ loop */
3372 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
3373 			GINTSTS_PTXFEMP |  \
3374 			GINTSTS_RXFLVL)
3375 
3376 static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3377 /**
3378  * dwc2_hsotg_core_init_disconnected - issue softreset to the core
3379  * @hsotg: The device state
3380  * @is_usb_reset: Usb resetting flag
3381  *
3382  * Issue a soft reset to the core, and await the core finishing it.
3383  */
dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg * hsotg,bool is_usb_reset)3384 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3385 				       bool is_usb_reset)
3386 {
3387 	u32 intmsk;
3388 	u32 val;
3389 	u32 usbcfg;
3390 	u32 dcfg = 0;
3391 	int ep;
3392 
3393 	/* Kill any ep0 requests as controller will be reinitialized */
3394 	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3395 
3396 	if (!is_usb_reset) {
3397 		if (dwc2_core_reset(hsotg, true))
3398 			return;
3399 	} else {
3400 		/* all endpoints should be shutdown */
3401 		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3402 			if (hsotg->eps_in[ep])
3403 				dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3404 			if (hsotg->eps_out[ep])
3405 				dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3406 		}
3407 	}
3408 
3409 	/*
3410 	 * we must now enable ep0 ready for host detection and then
3411 	 * set configuration.
3412 	 */
3413 
3414 	/* keep other bits untouched (so e.g. forced modes are not lost) */
3415 	usbcfg = dwc2_readl(hsotg, GUSBCFG);
3416 	usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
3417 	usbcfg |= GUSBCFG_TOUTCAL(7);
3418 
3419 	/* remove the HNP/SRP and set the PHY */
3420 	usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
3421         dwc2_writel(hsotg, usbcfg, GUSBCFG);
3422 
3423 	dwc2_phy_init(hsotg, true);
3424 
3425 	dwc2_hsotg_init_fifo(hsotg);
3426 
3427 	if (!is_usb_reset) {
3428 		dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3429 		if (hsotg->params.eusb2_disc)
3430 			dwc2_set_bit(hsotg, GOTGCTL, GOTGCTL_EUSB2_DISC_SUPP);
3431 	}
3432 
3433 	dcfg |= DCFG_EPMISCNT(1);
3434 
3435 	switch (hsotg->params.speed) {
3436 	case DWC2_SPEED_PARAM_LOW:
3437 		dcfg |= DCFG_DEVSPD_LS;
3438 		break;
3439 	case DWC2_SPEED_PARAM_FULL:
3440 		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3441 			dcfg |= DCFG_DEVSPD_FS48;
3442 		else
3443 			dcfg |= DCFG_DEVSPD_FS;
3444 		break;
3445 	default:
3446 		dcfg |= DCFG_DEVSPD_HS;
3447 	}
3448 
3449 	if (hsotg->params.ipg_isoc_en)
3450 		dcfg |= DCFG_IPG_ISOC_SUPPORDED;
3451 
3452 	dwc2_writel(hsotg, dcfg,  DCFG);
3453 
3454 	/* Clear any pending OTG interrupts */
3455 	dwc2_writel(hsotg, 0xffffffff, GOTGINT);
3456 
3457 	/* Clear any pending interrupts */
3458 	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
3459 	intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
3460 		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
3461 		GINTSTS_USBRST | GINTSTS_RESETDET |
3462 		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
3463 		GINTSTS_USBSUSP | GINTSTS_WKUPINT |
3464 		GINTSTS_LPMTRANRCVD;
3465 
3466 	if (!using_desc_dma(hsotg))
3467 		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
3468 
3469 	if (!hsotg->params.external_id_pin_ctl)
3470 		intmsk |= GINTSTS_CONIDSTSCHNG;
3471 
3472 	dwc2_writel(hsotg, intmsk, GINTMSK);
3473 
3474 	if (using_dma(hsotg)) {
3475 		dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
3476 			    hsotg->params.ahbcfg,
3477 			    GAHBCFG);
3478 
3479 		/* Set DDMA mode support in the core if needed */
3480 		if (using_desc_dma(hsotg))
3481 			dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
3482 
3483 	} else {
3484 		dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
3485 						(GAHBCFG_NP_TXF_EMP_LVL |
3486 						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
3487 			    GAHBCFG_GLBL_INTR_EN, GAHBCFG);
3488 	}
3489 
3490 	/*
3491 	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3492 	 * when we have no data to transfer. Otherwise we get being flooded by
3493 	 * interrupts.
3494 	 */
3495 
3496 	dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3497 		DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
3498 		DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
3499 		DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
3500 		DIEPMSK);
3501 
3502 	/*
3503 	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3504 	 * DMA mode we may need this and StsPhseRcvd.
3505 	 */
3506 	dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
3507 		DOEPMSK_STSPHSERCVDMSK) : 0) |
3508 		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
3509 		DOEPMSK_SETUPMSK,
3510 		DOEPMSK);
3511 
3512 	/* Enable BNA interrupt for DDMA */
3513 	if (using_desc_dma(hsotg)) {
3514 		dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
3515 		dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
3516 	}
3517 
3518 	/* Enable Service Interval mode if supported */
3519 	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3520 		dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
3521 
3522 	dwc2_writel(hsotg, 0, DAINTMSK);
3523 
3524 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3525 		dwc2_readl(hsotg, DIEPCTL0),
3526 		dwc2_readl(hsotg, DOEPCTL0));
3527 
3528 	/* enable in and out endpoint interrupts */
3529 	dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
3530 
3531 	/*
3532 	 * Enable the RXFIFO when in slave mode, as this is how we collect
3533 	 * the data. In DMA mode, we get events from the FIFO but also
3534 	 * things we cannot process, so do not use it.
3535 	 */
3536 	if (!using_dma(hsotg))
3537 		dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
3538 
3539 	/* Enable interrupts for EP0 in and out */
3540 	dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
3541 	dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
3542 
3543 	if (!is_usb_reset) {
3544 		dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3545 		udelay(10);  /* see openiboot */
3546 		dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3547 	}
3548 
3549 	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
3550 
3551 	/*
3552 	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
3553 	 * writing to the EPCTL register..
3554 	 */
3555 
3556 	/* set to read 1 8byte packet */
3557 	dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
3558 	       DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
3559 
3560 	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3561 	       DXEPCTL_CNAK | DXEPCTL_EPENA |
3562 	       DXEPCTL_USBACTEP,
3563 	       DOEPCTL0);
3564 
3565 	/* enable, but don't activate EP0in */
3566 	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3567 	       DXEPCTL_USBACTEP, DIEPCTL0);
3568 
3569 	/* clear global NAKs */
3570 	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3571 	if (!is_usb_reset)
3572 		val |= DCTL_SFTDISCON;
3573 	dwc2_set_bit(hsotg, DCTL, val);
3574 
3575 	/* configure the core to support LPM */
3576 	dwc2_gadget_init_lpm(hsotg);
3577 
3578 	/* program GREFCLK register if needed */
3579 	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3580 		dwc2_gadget_program_ref_clk(hsotg);
3581 
3582 	/* must be at-least 3ms to allow bus to see disconnect */
3583 	mdelay(3);
3584 
3585 	hsotg->lx_state = DWC2_L0;
3586 
3587 	dwc2_hsotg_enqueue_setup(hsotg);
3588 
3589 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3590 		dwc2_readl(hsotg, DIEPCTL0),
3591 		dwc2_readl(hsotg, DOEPCTL0));
3592 }
3593 
dwc2_hsotg_core_disconnect(struct dwc2_hsotg * hsotg)3594 void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
3595 {
3596 	/* set the soft-disconnect bit */
3597 	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3598 }
3599 
dwc2_hsotg_core_connect(struct dwc2_hsotg * hsotg)3600 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
3601 {
3602 	/* remove the soft-disconnect and let's go */
3603 	if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
3604 		dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
3605 }
3606 
3607 /**
3608  * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
3609  * @hsotg: The device state:
3610  *
3611  * This interrupt indicates one of the following conditions occurred while
3612  * transmitting an ISOC transaction.
3613  * - Corrupted IN Token for ISOC EP.
3614  * - Packet not complete in FIFO.
3615  *
3616  * The following actions will be taken:
3617  * - Determine the EP
3618  * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
3619  */
dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg * hsotg)3620 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3621 {
3622 	struct dwc2_hsotg_ep *hs_ep;
3623 	u32 epctrl;
3624 	u32 daintmsk;
3625 	u32 idx;
3626 
3627 	dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
3628 
3629 	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3630 
3631 	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3632 		hs_ep = hsotg->eps_in[idx];
3633 		/* Proceed only unmasked ISOC EPs */
3634 		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3635 			continue;
3636 
3637 		epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
3638 		if ((epctrl & DXEPCTL_EPENA) &&
3639 		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3640 			epctrl |= DXEPCTL_SNAK;
3641 			epctrl |= DXEPCTL_EPDIS;
3642 			dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
3643 		}
3644 	}
3645 
3646 	/* Clear interrupt */
3647 	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
3648 }
3649 
3650 /**
3651  * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
3652  * @hsotg: The device state:
3653  *
3654  * This interrupt indicates one of the following conditions occurred while
3655  * transmitting an ISOC transaction.
3656  * - Corrupted OUT Token for ISOC EP.
3657  * - Packet not complete in FIFO.
3658  *
3659  * The following actions will be taken:
3660  * - Determine the EP
3661  * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
3662  */
dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg * hsotg)3663 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3664 {
3665 	u32 gintsts;
3666 	u32 gintmsk;
3667 	u32 daintmsk;
3668 	u32 epctrl;
3669 	struct dwc2_hsotg_ep *hs_ep;
3670 	int idx;
3671 
3672 	dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
3673 
3674 	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3675 	daintmsk >>= DAINT_OUTEP_SHIFT;
3676 
3677 	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3678 		hs_ep = hsotg->eps_out[idx];
3679 		/* Proceed only unmasked ISOC EPs */
3680 		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3681 			continue;
3682 
3683 		epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3684 		if ((epctrl & DXEPCTL_EPENA) &&
3685 		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3686 			/* Unmask GOUTNAKEFF interrupt */
3687 			gintmsk = dwc2_readl(hsotg, GINTMSK);
3688 			gintmsk |= GINTSTS_GOUTNAKEFF;
3689 			dwc2_writel(hsotg, gintmsk, GINTMSK);
3690 
3691 			gintsts = dwc2_readl(hsotg, GINTSTS);
3692 			if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
3693 				dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3694 				break;
3695 			}
3696 		}
3697 	}
3698 
3699 	/* Clear interrupt */
3700 	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
3701 }
3702 
3703 /**
3704  * dwc2_hsotg_irq - handle device interrupt
3705  * @irq: The IRQ number triggered
3706  * @pw: The pw value when registered the handler.
3707  */
dwc2_hsotg_irq(int irq,void * pw)3708 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
3709 {
3710 	struct dwc2_hsotg *hsotg = pw;
3711 	int retry_count = 8;
3712 	u32 gintsts;
3713 	u32 gintmsk;
3714 
3715 	if (!dwc2_is_device_mode(hsotg))
3716 		return IRQ_NONE;
3717 
3718 	spin_lock(&hsotg->lock);
3719 irq_retry:
3720 	gintsts = dwc2_readl(hsotg, GINTSTS);
3721 	gintmsk = dwc2_readl(hsotg, GINTMSK);
3722 
3723 	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
3724 		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
3725 
3726 	gintsts &= gintmsk;
3727 
3728 	if (gintsts & GINTSTS_RESETDET) {
3729 		dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
3730 
3731 		dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
3732 
3733 		/* This event must be used only if controller is suspended */
3734 		if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
3735 			dwc2_exit_partial_power_down(hsotg, 0, true);
3736 
3737 		/* Exit gadget mode clock gating. */
3738 		if (hsotg->params.power_down ==
3739 		    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
3740 		    !hsotg->params.no_clock_gating)
3741 			dwc2_gadget_exit_clock_gating(hsotg, 0);
3742 
3743 		hsotg->lx_state = DWC2_L0;
3744 	}
3745 
3746 	if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
3747 		u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
3748 		u32 connected = hsotg->connected;
3749 
3750 		dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
3751 		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
3752 			dwc2_readl(hsotg, GNPTXSTS));
3753 
3754 		dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
3755 
3756 		/* Report disconnection if it is not already done. */
3757 		dwc2_hsotg_disconnect(hsotg);
3758 
3759 		/* Reset device address to zero */
3760 		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
3761 
3762 		if (usb_status & GOTGCTL_BSESVLD && connected)
3763 			dwc2_hsotg_core_init_disconnected(hsotg, true);
3764 	}
3765 
3766 	if (gintsts & GINTSTS_ENUMDONE) {
3767 		dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
3768 
3769 		dwc2_hsotg_irq_enumdone(hsotg);
3770 	}
3771 
3772 	if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
3773 		u32 daint = dwc2_readl(hsotg, DAINT);
3774 		u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
3775 		u32 daint_out, daint_in;
3776 		int ep;
3777 
3778 		daint &= daintmsk;
3779 		daint_out = daint >> DAINT_OUTEP_SHIFT;
3780 		daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
3781 
3782 		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
3783 
3784 		for (ep = 0; ep < hsotg->num_of_eps && daint_out;
3785 						ep++, daint_out >>= 1) {
3786 			if (daint_out & 1)
3787 				dwc2_hsotg_epint(hsotg, ep, 0);
3788 		}
3789 
3790 		for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
3791 						ep++, daint_in >>= 1) {
3792 			if (daint_in & 1)
3793 				dwc2_hsotg_epint(hsotg, ep, 1);
3794 		}
3795 	}
3796 
3797 	/* check both FIFOs */
3798 
3799 	if (gintsts & GINTSTS_NPTXFEMP) {
3800 		dev_dbg(hsotg->dev, "NPTxFEmp\n");
3801 
3802 		/*
3803 		 * Disable the interrupt to stop it happening again
3804 		 * unless one of these endpoint routines decides that
3805 		 * it needs re-enabling
3806 		 */
3807 
3808 		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
3809 		dwc2_hsotg_irq_fifoempty(hsotg, false);
3810 	}
3811 
3812 	if (gintsts & GINTSTS_PTXFEMP) {
3813 		dev_dbg(hsotg->dev, "PTxFEmp\n");
3814 
3815 		/* See note in GINTSTS_NPTxFEmp */
3816 
3817 		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
3818 		dwc2_hsotg_irq_fifoempty(hsotg, true);
3819 	}
3820 
3821 	if (gintsts & GINTSTS_RXFLVL) {
3822 		/*
3823 		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
3824 		 * we need to retry dwc2_hsotg_handle_rx if this is still
3825 		 * set.
3826 		 */
3827 
3828 		dwc2_hsotg_handle_rx(hsotg);
3829 	}
3830 
3831 	if (gintsts & GINTSTS_ERLYSUSP) {
3832 		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
3833 		dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
3834 	}
3835 
3836 	/*
3837 	 * these next two seem to crop-up occasionally causing the core
3838 	 * to shutdown the USB transfer, so try clearing them and logging
3839 	 * the occurrence.
3840 	 */
3841 
3842 	if (gintsts & GINTSTS_GOUTNAKEFF) {
3843 		u8 idx;
3844 		u32 epctrl;
3845 		u32 gintmsk;
3846 		u32 daintmsk;
3847 		struct dwc2_hsotg_ep *hs_ep;
3848 
3849 		daintmsk = dwc2_readl(hsotg, DAINTMSK);
3850 		daintmsk >>= DAINT_OUTEP_SHIFT;
3851 		/* Mask this interrupt */
3852 		gintmsk = dwc2_readl(hsotg, GINTMSK);
3853 		gintmsk &= ~GINTSTS_GOUTNAKEFF;
3854 		dwc2_writel(hsotg, gintmsk, GINTMSK);
3855 
3856 		dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
3857 		for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3858 			hs_ep = hsotg->eps_out[idx];
3859 			/* Proceed only unmasked ISOC EPs */
3860 			if (BIT(idx) & ~daintmsk)
3861 				continue;
3862 
3863 			epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3864 
3865 			//ISOC Ep's only
3866 			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
3867 				epctrl |= DXEPCTL_SNAK;
3868 				epctrl |= DXEPCTL_EPDIS;
3869 				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3870 				continue;
3871 			}
3872 
3873 			//Non-ISOC EP's
3874 			if (hs_ep->halted) {
3875 				if (!(epctrl & DXEPCTL_EPENA))
3876 					epctrl |= DXEPCTL_EPENA;
3877 				epctrl |= DXEPCTL_EPDIS;
3878 				epctrl |= DXEPCTL_STALL;
3879 				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3880 			}
3881 		}
3882 
3883 		/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
3884 	}
3885 
3886 	if (gintsts & GINTSTS_GINNAKEFF) {
3887 		dev_info(hsotg->dev, "GINNakEff triggered\n");
3888 
3889 		dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
3890 
3891 		dwc2_hsotg_dump(hsotg);
3892 	}
3893 
3894 	if (gintsts & GINTSTS_INCOMPL_SOIN)
3895 		dwc2_gadget_handle_incomplete_isoc_in(hsotg);
3896 
3897 	if (gintsts & GINTSTS_INCOMPL_SOOUT)
3898 		dwc2_gadget_handle_incomplete_isoc_out(hsotg);
3899 
3900 	/*
3901 	 * if we've had fifo events, we should try and go around the
3902 	 * loop again to see if there's any point in returning yet.
3903 	 */
3904 
3905 	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
3906 		goto irq_retry;
3907 
3908 	/* Check WKUP_ALERT interrupt*/
3909 	if (hsotg->params.service_interval)
3910 		dwc2_gadget_wkup_alert_handler(hsotg);
3911 
3912 	spin_unlock(&hsotg->lock);
3913 
3914 	return IRQ_HANDLED;
3915 }
3916 
dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep)3917 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
3918 				   struct dwc2_hsotg_ep *hs_ep)
3919 {
3920 	u32 epctrl_reg;
3921 	u32 epint_reg;
3922 
3923 	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
3924 		DOEPCTL(hs_ep->index);
3925 	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
3926 		DOEPINT(hs_ep->index);
3927 
3928 	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
3929 		hs_ep->name);
3930 
3931 	if (hs_ep->dir_in) {
3932 		if (hsotg->dedicated_fifos || hs_ep->periodic) {
3933 			dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
3934 			/* Wait for Nak effect */
3935 			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
3936 						    DXEPINT_INEPNAKEFF, 100))
3937 				dev_warn(hsotg->dev,
3938 					 "%s: timeout DIEPINT.NAKEFF\n",
3939 					 __func__);
3940 		} else {
3941 			dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
3942 			/* Wait for Nak effect */
3943 			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3944 						    GINTSTS_GINNAKEFF, 100))
3945 				dev_warn(hsotg->dev,
3946 					 "%s: timeout GINTSTS.GINNAKEFF\n",
3947 					 __func__);
3948 		}
3949 	} else {
3950 		/* Mask GINTSTS_GOUTNAKEFF interrupt */
3951 		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
3952 
3953 		if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
3954 			dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3955 
3956 		if (!using_dma(hsotg)) {
3957 			/* Wait for GINTSTS_RXFLVL interrupt */
3958 			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3959 						    GINTSTS_RXFLVL, 100)) {
3960 				dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
3961 					 __func__);
3962 			} else {
3963 				/*
3964 				 * Pop GLOBAL OUT NAK status packet from RxFIFO
3965 				 * to assert GOUTNAKEFF interrupt
3966 				 */
3967 				dwc2_readl(hsotg, GRXSTSP);
3968 			}
3969 		}
3970 
3971 		/* Wait for global nak to take effect */
3972 		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3973 					    GINTSTS_GOUTNAKEFF, 100))
3974 			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
3975 				 __func__);
3976 	}
3977 
3978 	/* Disable ep */
3979 	dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
3980 
3981 	/* Wait for ep to be disabled */
3982 	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
3983 		dev_warn(hsotg->dev,
3984 			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
3985 
3986 	/* Clear EPDISBLD interrupt */
3987 	dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
3988 
3989 	if (hs_ep->dir_in) {
3990 		unsigned short fifo_index;
3991 
3992 		if (hsotg->dedicated_fifos || hs_ep->periodic)
3993 			fifo_index = hs_ep->fifo_index;
3994 		else
3995 			fifo_index = 0;
3996 
3997 		/* Flush TX FIFO */
3998 		dwc2_flush_tx_fifo(hsotg, fifo_index);
3999 
4000 		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
4001 		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
4002 			dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
4003 
4004 	} else {
4005 		/* Remove global NAKs */
4006 		dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
4007 	}
4008 }
4009 
4010 /**
4011  * dwc2_hsotg_ep_enable - enable the given endpoint
4012  * @ep: The USB endpint to configure
4013  * @desc: The USB endpoint descriptor to configure with.
4014  *
4015  * This is called from the USB gadget code's usb_ep_enable().
4016  */
dwc2_hsotg_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)4017 static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
4018 				const struct usb_endpoint_descriptor *desc)
4019 {
4020 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4021 	struct dwc2_hsotg *hsotg = hs_ep->parent;
4022 	unsigned long flags;
4023 	unsigned int index = hs_ep->index;
4024 	u32 epctrl_reg;
4025 	u32 epctrl;
4026 	u32 mps;
4027 	u32 mc;
4028 	u32 mask;
4029 	unsigned int dir_in;
4030 	unsigned int i, val, size;
4031 	int ret = 0;
4032 	unsigned char ep_type;
4033 	int desc_num;
4034 
4035 	dev_dbg(hsotg->dev,
4036 		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
4037 		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
4038 		desc->wMaxPacketSize, desc->bInterval);
4039 
4040 	/* not to be called for EP0 */
4041 	if (index == 0) {
4042 		dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
4043 		return -EINVAL;
4044 	}
4045 
4046 	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
4047 	if (dir_in != hs_ep->dir_in) {
4048 		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
4049 		return -EINVAL;
4050 	}
4051 
4052 	ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
4053 	mps = usb_endpoint_maxp(desc);
4054 	mc = usb_endpoint_maxp_mult(desc);
4055 
4056 	/* ISOC IN in DDMA supported bInterval up to 10 */
4057 	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4058 	    dir_in && desc->bInterval > 10) {
4059 		dev_err(hsotg->dev,
4060 			"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
4061 		return -EINVAL;
4062 	}
4063 
4064 	/* High bandwidth ISOC OUT in DDMA not supported */
4065 	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4066 	    !dir_in && mc > 1) {
4067 		dev_err(hsotg->dev,
4068 			"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
4069 		return -EINVAL;
4070 	}
4071 
4072 	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
4073 
4074 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4075 	epctrl = dwc2_readl(hsotg, epctrl_reg);
4076 
4077 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
4078 		__func__, epctrl, epctrl_reg);
4079 
4080 	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
4081 		desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
4082 	else
4083 		desc_num = MAX_DMA_DESC_NUM_GENERIC;
4084 
4085 	/* Allocate DMA descriptor chain for non-ctrl endpoints */
4086 	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
4087 		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
4088 			desc_num * sizeof(struct dwc2_dma_desc),
4089 			&hs_ep->desc_list_dma, GFP_ATOMIC);
4090 		if (!hs_ep->desc_list) {
4091 			ret = -ENOMEM;
4092 			goto error2;
4093 		}
4094 	}
4095 
4096 	spin_lock_irqsave(&hsotg->lock, flags);
4097 
4098 	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
4099 	epctrl |= DXEPCTL_MPS(mps);
4100 
4101 	/*
4102 	 * mark the endpoint as active, otherwise the core may ignore
4103 	 * transactions entirely for this endpoint
4104 	 */
4105 	epctrl |= DXEPCTL_USBACTEP;
4106 
4107 	/* update the endpoint state */
4108 	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
4109 
4110 	/* default, set to non-periodic */
4111 	hs_ep->isochronous = 0;
4112 	hs_ep->periodic = 0;
4113 	hs_ep->halted = 0;
4114 	hs_ep->wedged = 0;
4115 	hs_ep->interval = desc->bInterval;
4116 
4117 	switch (ep_type) {
4118 	case USB_ENDPOINT_XFER_ISOC:
4119 		epctrl |= DXEPCTL_EPTYPE_ISO;
4120 		epctrl |= DXEPCTL_SETEVENFR;
4121 		hs_ep->isochronous = 1;
4122 		hs_ep->interval = 1 << (desc->bInterval - 1);
4123 		hs_ep->target_frame = TARGET_FRAME_INITIAL;
4124 		hs_ep->next_desc = 0;
4125 		hs_ep->compl_desc = 0;
4126 		if (dir_in) {
4127 			hs_ep->periodic = 1;
4128 			mask = dwc2_readl(hsotg, DIEPMSK);
4129 			mask |= DIEPMSK_NAKMSK;
4130 			dwc2_writel(hsotg, mask, DIEPMSK);
4131 		} else {
4132 			epctrl |= DXEPCTL_SNAK;
4133 			mask = dwc2_readl(hsotg, DOEPMSK);
4134 			mask |= DOEPMSK_OUTTKNEPDISMSK;
4135 			dwc2_writel(hsotg, mask, DOEPMSK);
4136 		}
4137 		break;
4138 
4139 	case USB_ENDPOINT_XFER_BULK:
4140 		epctrl |= DXEPCTL_EPTYPE_BULK;
4141 		break;
4142 
4143 	case USB_ENDPOINT_XFER_INT:
4144 		if (dir_in)
4145 			hs_ep->periodic = 1;
4146 
4147 		if (hsotg->gadget.speed == USB_SPEED_HIGH)
4148 			hs_ep->interval = 1 << (desc->bInterval - 1);
4149 
4150 		epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
4151 		break;
4152 
4153 	case USB_ENDPOINT_XFER_CONTROL:
4154 		epctrl |= DXEPCTL_EPTYPE_CONTROL;
4155 		break;
4156 	}
4157 
4158 	/*
4159 	 * if the hardware has dedicated fifos, we must give each IN EP
4160 	 * a unique tx-fifo even if it is non-periodic.
4161 	 */
4162 	if (dir_in && hsotg->dedicated_fifos) {
4163 		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
4164 		u32 fifo_index = 0;
4165 		u32 fifo_size = UINT_MAX;
4166 
4167 		size = hs_ep->ep.maxpacket * hs_ep->mc;
4168 		for (i = 1; i <= fifo_count; ++i) {
4169 			if (hsotg->fifo_map & (1 << i))
4170 				continue;
4171 			val = dwc2_readl(hsotg, DPTXFSIZN(i));
4172 			val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
4173 			if (val < size)
4174 				continue;
4175 			/* Search for smallest acceptable fifo */
4176 			if (val < fifo_size) {
4177 				fifo_size = val;
4178 				fifo_index = i;
4179 			}
4180 		}
4181 		if (!fifo_index) {
4182 			dev_err(hsotg->dev,
4183 				"%s: No suitable fifo found\n", __func__);
4184 			ret = -ENOMEM;
4185 			goto error1;
4186 		}
4187 		epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
4188 		hsotg->fifo_map |= 1 << fifo_index;
4189 		epctrl |= DXEPCTL_TXFNUM(fifo_index);
4190 		hs_ep->fifo_index = fifo_index;
4191 		hs_ep->fifo_size = fifo_size;
4192 	}
4193 
4194 	/* for non control endpoints, set PID to D0 */
4195 	if (index && !hs_ep->isochronous)
4196 		epctrl |= DXEPCTL_SETD0PID;
4197 
4198 	/* WA for Full speed ISOC IN in DDMA mode.
4199 	 * By Clear NAK status of EP, core will send ZLP
4200 	 * to IN token and assert NAK interrupt relying
4201 	 * on TxFIFO status only
4202 	 */
4203 
4204 	if (hsotg->gadget.speed == USB_SPEED_FULL &&
4205 	    hs_ep->isochronous && dir_in) {
4206 		/* The WA applies only to core versions from 2.72a
4207 		 * to 4.00a (including both). Also for FS_IOT_1.00a
4208 		 * and HS_IOT_1.00a.
4209 		 */
4210 		u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
4211 
4212 		if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
4213 		     gsnpsid <= DWC2_CORE_REV_4_00a) ||
4214 		     gsnpsid == DWC2_FS_IOT_REV_1_00a ||
4215 		     gsnpsid == DWC2_HS_IOT_REV_1_00a)
4216 			epctrl |= DXEPCTL_CNAK;
4217 	}
4218 
4219 	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
4220 		__func__, epctrl);
4221 
4222 	dwc2_writel(hsotg, epctrl, epctrl_reg);
4223 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
4224 		__func__, dwc2_readl(hsotg, epctrl_reg));
4225 
4226 	/* enable the endpoint interrupt */
4227 	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
4228 
4229 error1:
4230 	spin_unlock_irqrestore(&hsotg->lock, flags);
4231 
4232 error2:
4233 	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
4234 		dmam_free_coherent(hsotg->dev, desc_num *
4235 			sizeof(struct dwc2_dma_desc),
4236 			hs_ep->desc_list, hs_ep->desc_list_dma);
4237 		hs_ep->desc_list = NULL;
4238 	}
4239 
4240 	return ret;
4241 }
4242 
4243 /**
4244  * dwc2_hsotg_ep_disable - disable given endpoint
4245  * @ep: The endpoint to disable.
4246  */
dwc2_hsotg_ep_disable(struct usb_ep * ep)4247 static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
4248 {
4249 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4250 	struct dwc2_hsotg *hsotg = hs_ep->parent;
4251 	int dir_in = hs_ep->dir_in;
4252 	int index = hs_ep->index;
4253 	u32 epctrl_reg;
4254 	u32 ctrl;
4255 
4256 	dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4257 
4258 	if (ep == &hsotg->eps_out[0]->ep) {
4259 		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4260 		return -EINVAL;
4261 	}
4262 
4263 	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4264 		dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4265 		return -EINVAL;
4266 	}
4267 
4268 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4269 
4270 	ctrl = dwc2_readl(hsotg, epctrl_reg);
4271 
4272 	if (ctrl & DXEPCTL_EPENA)
4273 		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4274 
4275 	ctrl &= ~DXEPCTL_EPENA;
4276 	ctrl &= ~DXEPCTL_USBACTEP;
4277 	ctrl |= DXEPCTL_SNAK;
4278 
4279 	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4280 	dwc2_writel(hsotg, ctrl, epctrl_reg);
4281 
4282 	/* disable endpoint interrupts */
4283 	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4284 
4285 	/* terminate all requests with shutdown */
4286 	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4287 
4288 	hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4289 	hs_ep->fifo_index = 0;
4290 	hs_ep->fifo_size = 0;
4291 
4292 	return 0;
4293 }
4294 
dwc2_hsotg_ep_disable_lock(struct usb_ep * ep)4295 static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
4296 {
4297 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4298 	struct dwc2_hsotg *hsotg = hs_ep->parent;
4299 	unsigned long flags;
4300 	int ret;
4301 
4302 	spin_lock_irqsave(&hsotg->lock, flags);
4303 	ret = dwc2_hsotg_ep_disable(ep);
4304 	spin_unlock_irqrestore(&hsotg->lock, flags);
4305 	return ret;
4306 }
4307 
4308 /**
4309  * on_list - check request is on the given endpoint
4310  * @ep: The endpoint to check.
4311  * @test: The request to test if it is on the endpoint.
4312  */
on_list(struct dwc2_hsotg_ep * ep,struct dwc2_hsotg_req * test)4313 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
4314 {
4315 	struct dwc2_hsotg_req *req, *treq;
4316 
4317 	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4318 		if (req == test)
4319 			return true;
4320 	}
4321 
4322 	return false;
4323 }
4324 
4325 /**
4326  * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4327  * @ep: The endpoint to dequeue.
4328  * @req: The request to be removed from a queue.
4329  */
dwc2_hsotg_ep_dequeue(struct usb_ep * ep,struct usb_request * req)4330 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4331 {
4332 	struct dwc2_hsotg_req *hs_req = our_req(req);
4333 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4334 	struct dwc2_hsotg *hs = hs_ep->parent;
4335 	unsigned long flags;
4336 
4337 	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4338 
4339 	spin_lock_irqsave(&hs->lock, flags);
4340 
4341 	if (!on_list(hs_ep, hs_req)) {
4342 		spin_unlock_irqrestore(&hs->lock, flags);
4343 		return -EINVAL;
4344 	}
4345 
4346 	/* Dequeue already started request */
4347 	if (req == &hs_ep->req->req)
4348 		dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
4349 
4350 	dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4351 	spin_unlock_irqrestore(&hs->lock, flags);
4352 
4353 	return 0;
4354 }
4355 
4356 /**
4357  * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
4358  * @ep: The endpoint to be wedged.
4359  *
4360  */
dwc2_gadget_ep_set_wedge(struct usb_ep * ep)4361 static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
4362 {
4363 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4364 	struct dwc2_hsotg *hs = hs_ep->parent;
4365 
4366 	unsigned long	flags;
4367 	int		ret;
4368 
4369 	spin_lock_irqsave(&hs->lock, flags);
4370 	hs_ep->wedged = 1;
4371 	ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
4372 	spin_unlock_irqrestore(&hs->lock, flags);
4373 
4374 	return ret;
4375 }
4376 
4377 /**
4378  * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4379  * @ep: The endpoint to set halt.
4380  * @value: Set or unset the halt.
4381  * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4382  *       the endpoint is busy processing requests.
4383  *
4384  * We need to stall the endpoint immediately if request comes from set_feature
4385  * protocol command handler.
4386  */
dwc2_hsotg_ep_sethalt(struct usb_ep * ep,int value,bool now)4387 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
4388 {
4389 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4390 	struct dwc2_hsotg *hs = hs_ep->parent;
4391 	int index = hs_ep->index;
4392 	u32 epreg;
4393 	u32 epctl;
4394 	u32 xfertype;
4395 
4396 	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
4397 
4398 	if (index == 0) {
4399 		if (value)
4400 			dwc2_hsotg_stall_ep0(hs);
4401 		else
4402 			dev_warn(hs->dev,
4403 				 "%s: can't clear halt on ep0\n", __func__);
4404 		return 0;
4405 	}
4406 
4407 	if (hs_ep->isochronous) {
4408 		dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4409 		return -EINVAL;
4410 	}
4411 
4412 	if (!now && value && !list_empty(&hs_ep->queue)) {
4413 		dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
4414 			ep->name);
4415 		return -EAGAIN;
4416 	}
4417 
4418 	if (hs_ep->dir_in) {
4419 		epreg = DIEPCTL(index);
4420 		epctl = dwc2_readl(hs, epreg);
4421 
4422 		if (value) {
4423 			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
4424 			if (epctl & DXEPCTL_EPENA)
4425 				epctl |= DXEPCTL_EPDIS;
4426 		} else {
4427 			epctl &= ~DXEPCTL_STALL;
4428 			hs_ep->wedged = 0;
4429 			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4430 			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4431 			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4432 				epctl |= DXEPCTL_SETD0PID;
4433 		}
4434 		dwc2_writel(hs, epctl, epreg);
4435 	} else {
4436 		epreg = DOEPCTL(index);
4437 		epctl = dwc2_readl(hs, epreg);
4438 
4439 		if (value) {
4440 			/* Unmask GOUTNAKEFF interrupt */
4441 			dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
4442 
4443 			if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
4444 				dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
4445 			// STALL bit will be set in GOUTNAKEFF interrupt handler
4446 		} else {
4447 			epctl &= ~DXEPCTL_STALL;
4448 			hs_ep->wedged = 0;
4449 			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4450 			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4451 			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4452 				epctl |= DXEPCTL_SETD0PID;
4453 			dwc2_writel(hs, epctl, epreg);
4454 		}
4455 	}
4456 
4457 	hs_ep->halted = value;
4458 	return 0;
4459 }
4460 
4461 /**
4462  * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4463  * @ep: The endpoint to set halt.
4464  * @value: Set or unset the halt.
4465  */
dwc2_hsotg_ep_sethalt_lock(struct usb_ep * ep,int value)4466 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
4467 {
4468 	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4469 	struct dwc2_hsotg *hs = hs_ep->parent;
4470 	unsigned long flags;
4471 	int ret;
4472 
4473 	spin_lock_irqsave(&hs->lock, flags);
4474 	ret = dwc2_hsotg_ep_sethalt(ep, value, false);
4475 	spin_unlock_irqrestore(&hs->lock, flags);
4476 
4477 	return ret;
4478 }
4479 
4480 static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
4481 	.enable		= dwc2_hsotg_ep_enable,
4482 	.disable	= dwc2_hsotg_ep_disable_lock,
4483 	.alloc_request	= dwc2_hsotg_ep_alloc_request,
4484 	.free_request	= dwc2_hsotg_ep_free_request,
4485 	.queue		= dwc2_hsotg_ep_queue_lock,
4486 	.dequeue	= dwc2_hsotg_ep_dequeue,
4487 	.set_halt	= dwc2_hsotg_ep_sethalt_lock,
4488 	.set_wedge	= dwc2_gadget_ep_set_wedge,
4489 	/* note, don't believe we have any call for the fifo routines */
4490 };
4491 
4492 /**
4493  * dwc2_hsotg_init - initialize the usb core
4494  * @hsotg: The driver state
4495  */
dwc2_hsotg_init(struct dwc2_hsotg * hsotg)4496 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4497 {
4498 	/* unmask subset of endpoint interrupts */
4499 
4500 	dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
4501 		    DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
4502 		    DIEPMSK);
4503 
4504 	dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
4505 		    DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
4506 		    DOEPMSK);
4507 
4508 	dwc2_writel(hsotg, 0, DAINTMSK);
4509 
4510 	/* Be in disconnected state until gadget is registered */
4511 	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
4512 
4513 	/* setup fifos */
4514 
4515 	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4516 		dwc2_readl(hsotg, GRXFSIZ),
4517 		dwc2_readl(hsotg, GNPTXFSIZ));
4518 
4519 	dwc2_hsotg_init_fifo(hsotg);
4520 
4521 	if (using_dma(hsotg))
4522 		dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
4523 }
4524 
4525 /**
4526  * dwc2_hsotg_udc_start - prepare the udc for work
4527  * @gadget: The usb gadget state
4528  * @driver: The usb gadget driver
4529  *
4530  * Perform initialization to prepare udc device and driver
4531  * to work.
4532  */
dwc2_hsotg_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)4533 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
4534 				struct usb_gadget_driver *driver)
4535 {
4536 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4537 	unsigned long flags;
4538 	int ret;
4539 
4540 	if (!hsotg) {
4541 		pr_err("%s: called with no device\n", __func__);
4542 		return -ENODEV;
4543 	}
4544 
4545 	if (!driver) {
4546 		dev_err(hsotg->dev, "%s: no driver\n", __func__);
4547 		return -EINVAL;
4548 	}
4549 
4550 	if (driver->max_speed < USB_SPEED_FULL)
4551 		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
4552 
4553 	if (!driver->setup) {
4554 		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
4555 		return -EINVAL;
4556 	}
4557 
4558 	WARN_ON(hsotg->driver);
4559 
4560 	hsotg->driver = driver;
4561 	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
4562 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4563 
4564 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
4565 		ret = dwc2_lowlevel_hw_enable(hsotg);
4566 		if (ret)
4567 			goto err;
4568 	}
4569 
4570 	if (!IS_ERR_OR_NULL(hsotg->uphy))
4571 		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
4572 
4573 	spin_lock_irqsave(&hsotg->lock, flags);
4574 	if (dwc2_hw_is_device(hsotg)) {
4575 		dwc2_hsotg_init(hsotg);
4576 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4577 	}
4578 
4579 	hsotg->enabled = 0;
4580 	spin_unlock_irqrestore(&hsotg->lock, flags);
4581 
4582 	gadget->sg_supported = using_desc_dma(hsotg);
4583 	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
4584 
4585 	return 0;
4586 
4587 err:
4588 	hsotg->driver = NULL;
4589 	return ret;
4590 }
4591 
4592 /**
4593  * dwc2_hsotg_udc_stop - stop the udc
4594  * @gadget: The usb gadget state
4595  *
4596  * Stop udc hw block and stay tunned for future transmissions
4597  */
dwc2_hsotg_udc_stop(struct usb_gadget * gadget)4598 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
4599 {
4600 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4601 	unsigned long flags;
4602 	int ep;
4603 
4604 	if (!hsotg)
4605 		return -ENODEV;
4606 
4607 	/* all endpoints should be shutdown */
4608 	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
4609 		if (hsotg->eps_in[ep])
4610 			dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
4611 		if (hsotg->eps_out[ep])
4612 			dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
4613 	}
4614 
4615 	spin_lock_irqsave(&hsotg->lock, flags);
4616 
4617 	hsotg->driver = NULL;
4618 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4619 	hsotg->enabled = 0;
4620 
4621 	spin_unlock_irqrestore(&hsotg->lock, flags);
4622 
4623 	if (!IS_ERR_OR_NULL(hsotg->uphy))
4624 		otg_set_peripheral(hsotg->uphy->otg, NULL);
4625 
4626 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4627 		dwc2_lowlevel_hw_disable(hsotg);
4628 
4629 	return 0;
4630 }
4631 
4632 /**
4633  * dwc2_hsotg_gadget_getframe - read the frame number
4634  * @gadget: The usb gadget state
4635  *
4636  * Read the {micro} frame number
4637  */
dwc2_hsotg_gadget_getframe(struct usb_gadget * gadget)4638 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
4639 {
4640 	return dwc2_hsotg_read_frameno(to_hsotg(gadget));
4641 }
4642 
4643 /**
4644  * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
4645  * @gadget: The usb gadget state
4646  * @is_selfpowered: Whether the device is self-powered
4647  *
4648  * Set if the device is self or bus powered.
4649  */
dwc2_hsotg_set_selfpowered(struct usb_gadget * gadget,int is_selfpowered)4650 static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
4651 				      int is_selfpowered)
4652 {
4653 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4654 	unsigned long flags;
4655 
4656 	spin_lock_irqsave(&hsotg->lock, flags);
4657 	gadget->is_selfpowered = !!is_selfpowered;
4658 	spin_unlock_irqrestore(&hsotg->lock, flags);
4659 
4660 	return 0;
4661 }
4662 
4663 /**
4664  * dwc2_hsotg_pullup - connect/disconnect the USB PHY
4665  * @gadget: The usb gadget state
4666  * @is_on: Current state of the USB PHY
4667  *
4668  * Connect/Disconnect the USB PHY pullup
4669  */
dwc2_hsotg_pullup(struct usb_gadget * gadget,int is_on)4670 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
4671 {
4672 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4673 	unsigned long flags;
4674 
4675 	dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
4676 		hsotg->op_state);
4677 
4678 	/* Don't modify pullup state while in host mode */
4679 	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4680 		hsotg->enabled = is_on;
4681 		return 0;
4682 	}
4683 
4684 	spin_lock_irqsave(&hsotg->lock, flags);
4685 	if (is_on) {
4686 		hsotg->enabled = 1;
4687 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4688 		/* Enable ACG feature in device mode,if supported */
4689 		dwc2_enable_acg(hsotg);
4690 		dwc2_hsotg_core_connect(hsotg);
4691 	} else {
4692 		dwc2_hsotg_core_disconnect(hsotg);
4693 		dwc2_hsotg_disconnect(hsotg);
4694 		hsotg->enabled = 0;
4695 	}
4696 
4697 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4698 	spin_unlock_irqrestore(&hsotg->lock, flags);
4699 
4700 	return 0;
4701 }
4702 
dwc2_hsotg_vbus_session(struct usb_gadget * gadget,int is_active)4703 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
4704 {
4705 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4706 	unsigned long flags;
4707 
4708 	dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
4709 	spin_lock_irqsave(&hsotg->lock, flags);
4710 
4711 	/*
4712 	 * If controller is in partial power down state, it must exit from
4713 	 * that state before being initialized / de-initialized
4714 	 */
4715 	if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
4716 		/*
4717 		 * No need to check the return value as
4718 		 * registers are not being restored.
4719 		 */
4720 		dwc2_exit_partial_power_down(hsotg, 0, false);
4721 
4722 	if (is_active) {
4723 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4724 
4725 		dwc2_hsotg_core_init_disconnected(hsotg, false);
4726 		if (hsotg->enabled) {
4727 			/* Enable ACG feature in device mode,if supported */
4728 			dwc2_enable_acg(hsotg);
4729 			dwc2_hsotg_core_connect(hsotg);
4730 		}
4731 	} else {
4732 		dwc2_hsotg_core_disconnect(hsotg);
4733 		dwc2_hsotg_disconnect(hsotg);
4734 	}
4735 
4736 	spin_unlock_irqrestore(&hsotg->lock, flags);
4737 	return 0;
4738 }
4739 
4740 /**
4741  * dwc2_hsotg_vbus_draw - report bMaxPower field
4742  * @gadget: The usb gadget state
4743  * @mA: Amount of current
4744  *
4745  * Report how much power the device may consume to the phy.
4746  */
dwc2_hsotg_vbus_draw(struct usb_gadget * gadget,unsigned int mA)4747 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
4748 {
4749 	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4750 
4751 	if (IS_ERR_OR_NULL(hsotg->uphy))
4752 		return -ENOTSUPP;
4753 	return usb_phy_set_power(hsotg->uphy, mA);
4754 }
4755 
dwc2_gadget_set_speed(struct usb_gadget * g,enum usb_device_speed speed)4756 static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
4757 {
4758 	struct dwc2_hsotg *hsotg = to_hsotg(g);
4759 	unsigned long		flags;
4760 
4761 	spin_lock_irqsave(&hsotg->lock, flags);
4762 	switch (speed) {
4763 	case USB_SPEED_HIGH:
4764 		hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
4765 		break;
4766 	case USB_SPEED_FULL:
4767 		hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
4768 		break;
4769 	case USB_SPEED_LOW:
4770 		hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
4771 		break;
4772 	default:
4773 		dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
4774 	}
4775 	spin_unlock_irqrestore(&hsotg->lock, flags);
4776 }
4777 
4778 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
4779 	.get_frame	= dwc2_hsotg_gadget_getframe,
4780 	.set_selfpowered	= dwc2_hsotg_set_selfpowered,
4781 	.udc_start		= dwc2_hsotg_udc_start,
4782 	.udc_stop		= dwc2_hsotg_udc_stop,
4783 	.pullup                 = dwc2_hsotg_pullup,
4784 	.udc_set_speed		= dwc2_gadget_set_speed,
4785 	.vbus_session		= dwc2_hsotg_vbus_session,
4786 	.vbus_draw		= dwc2_hsotg_vbus_draw,
4787 };
4788 
4789 /**
4790  * dwc2_hsotg_initep - initialise a single endpoint
4791  * @hsotg: The device state.
4792  * @hs_ep: The endpoint to be initialised.
4793  * @epnum: The endpoint number
4794  * @dir_in: True if direction is in.
4795  *
4796  * Initialise the given endpoint (as part of the probe and device state
4797  * creation) to give to the gadget driver. Setup the endpoint name, any
4798  * direction information and other state that may be required.
4799  */
dwc2_hsotg_initep(struct dwc2_hsotg * hsotg,struct dwc2_hsotg_ep * hs_ep,int epnum,bool dir_in)4800 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
4801 			      struct dwc2_hsotg_ep *hs_ep,
4802 				       int epnum,
4803 				       bool dir_in)
4804 {
4805 	char *dir;
4806 
4807 	if (epnum == 0)
4808 		dir = "";
4809 	else if (dir_in)
4810 		dir = "in";
4811 	else
4812 		dir = "out";
4813 
4814 	hs_ep->dir_in = dir_in;
4815 	hs_ep->index = epnum;
4816 
4817 	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
4818 
4819 	INIT_LIST_HEAD(&hs_ep->queue);
4820 	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
4821 
4822 	/* add to the list of endpoints known by the gadget driver */
4823 	if (epnum)
4824 		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
4825 
4826 	hs_ep->parent = hsotg;
4827 	hs_ep->ep.name = hs_ep->name;
4828 
4829 	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
4830 		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
4831 	else
4832 		usb_ep_set_maxpacket_limit(&hs_ep->ep,
4833 					   epnum ? 1024 : EP0_MPS_LIMIT);
4834 	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
4835 
4836 	if (epnum == 0) {
4837 		hs_ep->ep.caps.type_control = true;
4838 	} else {
4839 		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
4840 			hs_ep->ep.caps.type_iso = true;
4841 			hs_ep->ep.caps.type_bulk = true;
4842 		}
4843 		hs_ep->ep.caps.type_int = true;
4844 	}
4845 
4846 	if (dir_in)
4847 		hs_ep->ep.caps.dir_in = true;
4848 	else
4849 		hs_ep->ep.caps.dir_out = true;
4850 
4851 	/*
4852 	 * if we're using dma, we need to set the next-endpoint pointer
4853 	 * to be something valid.
4854 	 */
4855 
4856 	if (using_dma(hsotg)) {
4857 		u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
4858 
4859 		if (dir_in)
4860 			dwc2_writel(hsotg, next, DIEPCTL(epnum));
4861 		else
4862 			dwc2_writel(hsotg, next, DOEPCTL(epnum));
4863 	}
4864 }
4865 
4866 /**
4867  * dwc2_hsotg_hw_cfg - read HW configuration registers
4868  * @hsotg: Programming view of the DWC_otg controller
4869  *
4870  * Read the USB core HW configuration registers
4871  */
dwc2_hsotg_hw_cfg(struct dwc2_hsotg * hsotg)4872 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
4873 {
4874 	u32 cfg;
4875 	u32 ep_type;
4876 	u32 i;
4877 
4878 	/* check hardware configuration */
4879 
4880 	hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
4881 
4882 	/* Add ep0 */
4883 	hsotg->num_of_eps++;
4884 
4885 	hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
4886 					sizeof(struct dwc2_hsotg_ep),
4887 					GFP_KERNEL);
4888 	if (!hsotg->eps_in[0])
4889 		return -ENOMEM;
4890 	/* Same dwc2_hsotg_ep is used in both directions for ep0 */
4891 	hsotg->eps_out[0] = hsotg->eps_in[0];
4892 
4893 	cfg = hsotg->hw_params.dev_ep_dirs;
4894 	for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
4895 		ep_type = cfg & 3;
4896 		/* Direction in or both */
4897 		if (!(ep_type & 2)) {
4898 			hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
4899 				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4900 			if (!hsotg->eps_in[i])
4901 				return -ENOMEM;
4902 		}
4903 		/* Direction out or both */
4904 		if (!(ep_type & 1)) {
4905 			hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
4906 				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4907 			if (!hsotg->eps_out[i])
4908 				return -ENOMEM;
4909 		}
4910 	}
4911 
4912 	hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
4913 	hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
4914 
4915 	dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
4916 		 hsotg->num_of_eps,
4917 		 hsotg->dedicated_fifos ? "dedicated" : "shared",
4918 		 hsotg->fifo_mem);
4919 	return 0;
4920 }
4921 
4922 /**
4923  * dwc2_hsotg_dump - dump state of the udc
4924  * @hsotg: Programming view of the DWC_otg controller
4925  *
4926  */
dwc2_hsotg_dump(struct dwc2_hsotg * hsotg)4927 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
4928 {
4929 #ifdef DEBUG
4930 	struct device *dev = hsotg->dev;
4931 	u32 val;
4932 	int idx;
4933 
4934 	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
4935 		 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
4936 		 dwc2_readl(hsotg, DIEPMSK));
4937 
4938 	dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
4939 		 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
4940 
4941 	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4942 		 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
4943 
4944 	/* show periodic fifo settings */
4945 
4946 	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4947 		val = dwc2_readl(hsotg, DPTXFSIZN(idx));
4948 		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
4949 			 val >> FIFOSIZE_DEPTH_SHIFT,
4950 			 val & FIFOSIZE_STARTADDR_MASK);
4951 	}
4952 
4953 	for (idx = 0; idx < hsotg->num_of_eps; idx++) {
4954 		dev_info(dev,
4955 			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
4956 			 dwc2_readl(hsotg, DIEPCTL(idx)),
4957 			 dwc2_readl(hsotg, DIEPTSIZ(idx)),
4958 			 dwc2_readl(hsotg, DIEPDMA(idx)));
4959 
4960 		val = dwc2_readl(hsotg, DOEPCTL(idx));
4961 		dev_info(dev,
4962 			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
4963 			 idx, dwc2_readl(hsotg, DOEPCTL(idx)),
4964 			 dwc2_readl(hsotg, DOEPTSIZ(idx)),
4965 			 dwc2_readl(hsotg, DOEPDMA(idx)));
4966 	}
4967 
4968 	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
4969 		 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
4970 #endif
4971 }
4972 
4973 /**
4974  * dwc2_gadget_init - init function for gadget
4975  * @hsotg: Programming view of the DWC_otg controller
4976  *
4977  */
dwc2_gadget_init(struct dwc2_hsotg * hsotg)4978 int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
4979 {
4980 	struct device *dev = hsotg->dev;
4981 	int epnum;
4982 	int ret;
4983 
4984 	/* Dump fifo information */
4985 	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
4986 		hsotg->params.g_np_tx_fifo_size);
4987 	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
4988 
4989 	switch (hsotg->params.speed) {
4990 	case DWC2_SPEED_PARAM_LOW:
4991 		hsotg->gadget.max_speed = USB_SPEED_LOW;
4992 		break;
4993 	case DWC2_SPEED_PARAM_FULL:
4994 		hsotg->gadget.max_speed = USB_SPEED_FULL;
4995 		break;
4996 	default:
4997 		hsotg->gadget.max_speed = USB_SPEED_HIGH;
4998 		break;
4999 	}
5000 
5001 	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
5002 	hsotg->gadget.name = dev_name(dev);
5003 	hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
5004 	hsotg->remote_wakeup_allowed = 0;
5005 
5006 	if (hsotg->params.lpm)
5007 		hsotg->gadget.lpm_capable = true;
5008 
5009 	if (hsotg->dr_mode == USB_DR_MODE_OTG)
5010 		hsotg->gadget.is_otg = 1;
5011 	else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
5012 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5013 
5014 	ret = dwc2_hsotg_hw_cfg(hsotg);
5015 	if (ret) {
5016 		dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
5017 		return ret;
5018 	}
5019 
5020 	hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
5021 			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
5022 	if (!hsotg->ctrl_buff)
5023 		return -ENOMEM;
5024 
5025 	hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
5026 			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
5027 	if (!hsotg->ep0_buff)
5028 		return -ENOMEM;
5029 
5030 	if (using_desc_dma(hsotg)) {
5031 		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
5032 		if (ret < 0)
5033 			return ret;
5034 	}
5035 
5036 	ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
5037 			       IRQF_SHARED, dev_name(hsotg->dev), hsotg);
5038 	if (ret < 0) {
5039 		dev_err(dev, "cannot claim IRQ for gadget\n");
5040 		return ret;
5041 	}
5042 
5043 	/* hsotg->num_of_eps holds number of EPs other than ep0 */
5044 
5045 	if (hsotg->num_of_eps == 0) {
5046 		dev_err(dev, "wrong number of EPs (zero)\n");
5047 		return -EINVAL;
5048 	}
5049 
5050 	/* setup endpoint information */
5051 
5052 	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
5053 	hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
5054 
5055 	/* allocate EP0 request */
5056 
5057 	hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
5058 						     GFP_KERNEL);
5059 	if (!hsotg->ctrl_req) {
5060 		dev_err(dev, "failed to allocate ctrl req\n");
5061 		return -ENOMEM;
5062 	}
5063 
5064 	/* initialise the endpoints now the core has been initialised */
5065 	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
5066 		if (hsotg->eps_in[epnum])
5067 			dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
5068 					  epnum, 1);
5069 		if (hsotg->eps_out[epnum])
5070 			dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
5071 					  epnum, 0);
5072 	}
5073 
5074 	dwc2_hsotg_dump(hsotg);
5075 
5076 	return 0;
5077 }
5078 
5079 /**
5080  * dwc2_hsotg_remove - remove function for hsotg driver
5081  * @hsotg: Programming view of the DWC_otg controller
5082  *
5083  */
dwc2_hsotg_remove(struct dwc2_hsotg * hsotg)5084 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
5085 {
5086 	usb_del_gadget_udc(&hsotg->gadget);
5087 	dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
5088 
5089 	return 0;
5090 }
5091 
dwc2_hsotg_suspend(struct dwc2_hsotg * hsotg)5092 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
5093 {
5094 	unsigned long flags;
5095 
5096 	if (hsotg->lx_state != DWC2_L0)
5097 		return 0;
5098 
5099 	if (hsotg->driver) {
5100 		int ep;
5101 
5102 		dev_info(hsotg->dev, "suspending usb gadget %s\n",
5103 			 hsotg->driver->driver.name);
5104 
5105 		spin_lock_irqsave(&hsotg->lock, flags);
5106 		if (hsotg->enabled)
5107 			dwc2_hsotg_core_disconnect(hsotg);
5108 		dwc2_hsotg_disconnect(hsotg);
5109 		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5110 		spin_unlock_irqrestore(&hsotg->lock, flags);
5111 
5112 		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
5113 			if (hsotg->eps_in[ep])
5114 				dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
5115 			if (hsotg->eps_out[ep])
5116 				dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
5117 		}
5118 	}
5119 
5120 	return 0;
5121 }
5122 
dwc2_hsotg_resume(struct dwc2_hsotg * hsotg)5123 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
5124 {
5125 	unsigned long flags;
5126 
5127 	if (hsotg->lx_state == DWC2_L2)
5128 		return 0;
5129 
5130 	if (hsotg->driver) {
5131 		dev_info(hsotg->dev, "resuming usb gadget %s\n",
5132 			 hsotg->driver->driver.name);
5133 
5134 		spin_lock_irqsave(&hsotg->lock, flags);
5135 		dwc2_hsotg_core_init_disconnected(hsotg, false);
5136 		if (hsotg->enabled) {
5137 			/* Enable ACG feature in device mode,if supported */
5138 			dwc2_enable_acg(hsotg);
5139 			dwc2_hsotg_core_connect(hsotg);
5140 		}
5141 		spin_unlock_irqrestore(&hsotg->lock, flags);
5142 	}
5143 
5144 	return 0;
5145 }
5146 
5147 /**
5148  * dwc2_backup_device_registers() - Backup controller device registers.
5149  * When suspending usb bus, registers needs to be backuped
5150  * if controller power is disabled once suspended.
5151  *
5152  * @hsotg: Programming view of the DWC_otg controller
5153  */
dwc2_backup_device_registers(struct dwc2_hsotg * hsotg)5154 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
5155 {
5156 	struct dwc2_dregs_backup *dr;
5157 	int i;
5158 
5159 	dev_dbg(hsotg->dev, "%s\n", __func__);
5160 
5161 	/* Backup dev regs */
5162 	dr = &hsotg->dr_backup;
5163 
5164 	dr->dcfg = dwc2_readl(hsotg, DCFG);
5165 	dr->dctl = dwc2_readl(hsotg, DCTL);
5166 	dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
5167 	dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
5168 	dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
5169 
5170 	for (i = 0; i < hsotg->num_of_eps; i++) {
5171 		/* Backup IN EPs */
5172 		dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
5173 
5174 		/* Ensure DATA PID is correctly configured */
5175 		if (dr->diepctl[i] & DXEPCTL_DPID)
5176 			dr->diepctl[i] |= DXEPCTL_SETD1PID;
5177 		else
5178 			dr->diepctl[i] |= DXEPCTL_SETD0PID;
5179 
5180 		dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
5181 		dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
5182 
5183 		/* Backup OUT EPs */
5184 		dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
5185 
5186 		/* Ensure DATA PID is correctly configured */
5187 		if (dr->doepctl[i] & DXEPCTL_DPID)
5188 			dr->doepctl[i] |= DXEPCTL_SETD1PID;
5189 		else
5190 			dr->doepctl[i] |= DXEPCTL_SETD0PID;
5191 
5192 		dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
5193 		dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
5194 		dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
5195 	}
5196 	dr->valid = true;
5197 	return 0;
5198 }
5199 
5200 /**
5201  * dwc2_restore_device_registers() - Restore controller device registers.
5202  * When resuming usb bus, device registers needs to be restored
5203  * if controller power were disabled.
5204  *
5205  * @hsotg: Programming view of the DWC_otg controller
5206  * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
5207  *
5208  * Return: 0 if successful, negative error code otherwise
5209  */
dwc2_restore_device_registers(struct dwc2_hsotg * hsotg,int remote_wakeup)5210 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
5211 {
5212 	struct dwc2_dregs_backup *dr;
5213 	int i;
5214 
5215 	dev_dbg(hsotg->dev, "%s\n", __func__);
5216 
5217 	/* Restore dev regs */
5218 	dr = &hsotg->dr_backup;
5219 	if (!dr->valid) {
5220 		dev_err(hsotg->dev, "%s: no device registers to restore\n",
5221 			__func__);
5222 		return -EINVAL;
5223 	}
5224 	dr->valid = false;
5225 
5226 	if (!remote_wakeup)
5227 		dwc2_writel(hsotg, dr->dctl, DCTL);
5228 
5229 	dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
5230 	dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
5231 	dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
5232 
5233 	for (i = 0; i < hsotg->num_of_eps; i++) {
5234 		/* Restore IN EPs */
5235 		dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
5236 		dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
5237 		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5238 		/** WA for enabled EPx's IN in DDMA mode. On entering to
5239 		 * hibernation wrong value read and saved from DIEPDMAx,
5240 		 * as result BNA interrupt asserted on hibernation exit
5241 		 * by restoring from saved area.
5242 		 */
5243 		if (using_desc_dma(hsotg) &&
5244 		    (dr->diepctl[i] & DXEPCTL_EPENA))
5245 			dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
5246 		dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
5247 		dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
5248 		/* Restore OUT EPs */
5249 		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5250 		/* WA for enabled EPx's OUT in DDMA mode. On entering to
5251 		 * hibernation wrong value read and saved from DOEPDMAx,
5252 		 * as result BNA interrupt asserted on hibernation exit
5253 		 * by restoring from saved area.
5254 		 */
5255 		if (using_desc_dma(hsotg) &&
5256 		    (dr->doepctl[i] & DXEPCTL_EPENA))
5257 			dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
5258 		dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
5259 		dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
5260 	}
5261 
5262 	return 0;
5263 }
5264 
5265 /**
5266  * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
5267  *
5268  * @hsotg: Programming view of DWC_otg controller
5269  *
5270  */
dwc2_gadget_init_lpm(struct dwc2_hsotg * hsotg)5271 void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
5272 {
5273 	u32 val;
5274 
5275 	if (!hsotg->params.lpm)
5276 		return;
5277 
5278 	val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
5279 	val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
5280 	val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
5281 	val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
5282 	val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
5283 	val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
5284 	val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
5285 	dwc2_writel(hsotg, val, GLPMCFG);
5286 	dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
5287 
5288 	/* Unmask WKUP_ALERT Interrupt */
5289 	if (hsotg->params.service_interval)
5290 		dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
5291 }
5292 
5293 /**
5294  * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
5295  *
5296  * @hsotg: Programming view of DWC_otg controller
5297  *
5298  */
dwc2_gadget_program_ref_clk(struct dwc2_hsotg * hsotg)5299 void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
5300 {
5301 	u32 val = 0;
5302 
5303 	val |= GREFCLK_REF_CLK_MODE;
5304 	val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
5305 	val |= hsotg->params.sof_cnt_wkup_alert <<
5306 	       GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
5307 
5308 	dwc2_writel(hsotg, val, GREFCLK);
5309 	dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
5310 }
5311 
5312 /**
5313  * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
5314  *
5315  * @hsotg: Programming view of the DWC_otg controller
5316  *
5317  * Return non-zero if failed to enter to hibernation.
5318  */
dwc2_gadget_enter_hibernation(struct dwc2_hsotg * hsotg)5319 int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
5320 {
5321 	u32 gpwrdn;
5322 	u32 gusbcfg;
5323 	u32 pcgcctl;
5324 	int ret = 0;
5325 
5326 	/* Change to L2(suspend) state */
5327 	hsotg->lx_state = DWC2_L2;
5328 	dev_dbg(hsotg->dev, "Start of hibernation completed\n");
5329 	ret = dwc2_backup_global_registers(hsotg);
5330 	if (ret) {
5331 		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5332 			__func__);
5333 		return ret;
5334 	}
5335 	ret = dwc2_backup_device_registers(hsotg);
5336 	if (ret) {
5337 		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5338 			__func__);
5339 		return ret;
5340 	}
5341 
5342 	gpwrdn = GPWRDN_PWRDNRSTN;
5343 	udelay(10);
5344 	gusbcfg = dwc2_readl(hsotg, GUSBCFG);
5345 	if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
5346 		/* ULPI interface */
5347 		gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
5348 	}
5349 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5350 	udelay(10);
5351 
5352 	/* Suspend the Phy Clock */
5353 	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5354 	pcgcctl |= PCGCTL_STOPPCLK;
5355 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5356 	udelay(10);
5357 
5358 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5359 	gpwrdn |= GPWRDN_PMUACTV;
5360 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5361 	udelay(10);
5362 
5363 	/* Set flag to indicate that we are in hibernation */
5364 	hsotg->hibernated = 1;
5365 
5366 	/* Enable interrupts from wake up logic */
5367 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5368 	gpwrdn |= GPWRDN_PMUINTSEL;
5369 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5370 	udelay(10);
5371 
5372 	/* Unmask device mode interrupts in GPWRDN */
5373 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5374 	gpwrdn |= GPWRDN_RST_DET_MSK;
5375 	gpwrdn |= GPWRDN_LNSTSCHG_MSK;
5376 	gpwrdn |= GPWRDN_STS_CHGINT_MSK;
5377 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5378 	udelay(10);
5379 
5380 	/* Enable Power Down Clamp */
5381 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5382 	gpwrdn |= GPWRDN_PWRDNCLMP;
5383 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5384 	udelay(10);
5385 
5386 	/* Switch off VDD */
5387 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5388 	gpwrdn |= GPWRDN_PWRDNSWTCH;
5389 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5390 	udelay(10);
5391 
5392 	/* Save gpwrdn register for further usage if stschng interrupt */
5393 	hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
5394 	dev_dbg(hsotg->dev, "Hibernation completed\n");
5395 
5396 	return ret;
5397 }
5398 
5399 /**
5400  * dwc2_gadget_exit_hibernation()
5401  * This function is for exiting from Device mode hibernation by host initiated
5402  * resume/reset and device initiated remote-wakeup.
5403  *
5404  * @hsotg: Programming view of the DWC_otg controller
5405  * @rem_wakeup: indicates whether resume is initiated by Device or Host.
5406  * @reset: indicates whether resume is initiated by Reset.
5407  *
5408  * Return non-zero if failed to exit from hibernation.
5409  */
dwc2_gadget_exit_hibernation(struct dwc2_hsotg * hsotg,int rem_wakeup,int reset)5410 int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
5411 				 int rem_wakeup, int reset)
5412 {
5413 	u32 pcgcctl;
5414 	u32 gpwrdn;
5415 	u32 dctl;
5416 	int ret = 0;
5417 	struct dwc2_gregs_backup *gr;
5418 	struct dwc2_dregs_backup *dr;
5419 
5420 	gr = &hsotg->gr_backup;
5421 	dr = &hsotg->dr_backup;
5422 
5423 	if (!hsotg->hibernated) {
5424 		dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
5425 		return 1;
5426 	}
5427 	dev_dbg(hsotg->dev,
5428 		"%s: called with rem_wakeup = %d reset = %d\n",
5429 		__func__, rem_wakeup, reset);
5430 
5431 	dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
5432 
5433 	if (!reset) {
5434 		/* Clear all pending interupts */
5435 		dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5436 	}
5437 
5438 	/* De-assert Restore */
5439 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5440 	gpwrdn &= ~GPWRDN_RESTORE;
5441 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5442 	udelay(10);
5443 
5444 	if (!rem_wakeup) {
5445 		pcgcctl = dwc2_readl(hsotg, PCGCTL);
5446 		pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5447 		dwc2_writel(hsotg, pcgcctl, PCGCTL);
5448 	}
5449 
5450 	/* Restore GUSBCFG, DCFG and DCTL */
5451 	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5452 	dwc2_writel(hsotg, dr->dcfg, DCFG);
5453 	dwc2_writel(hsotg, dr->dctl, DCTL);
5454 
5455 	/* On USB Reset, reset device address to zero */
5456 	if (reset)
5457 		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
5458 
5459 	/* Reset ULPI latch */
5460 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5461 	gpwrdn &= ~GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
5462 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5463 
5464 	/* De-assert Wakeup Logic */
5465 	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5466 	gpwrdn &= ~GPWRDN_PMUACTV;
5467 	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5468 
5469 	if (rem_wakeup) {
5470 		udelay(10);
5471 		/* Start Remote Wakeup Signaling */
5472 		dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
5473 	} else {
5474 		udelay(50);
5475 		/* Set Device programming done bit */
5476 		dctl = dwc2_readl(hsotg, DCTL);
5477 		dctl |= DCTL_PWRONPRGDONE;
5478 		dwc2_writel(hsotg, dctl, DCTL);
5479 	}
5480 	/* Wait for interrupts which must be cleared */
5481 	mdelay(2);
5482 	/* Clear all pending interupts */
5483 	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5484 
5485 	/* Restore global registers */
5486 	ret = dwc2_restore_global_registers(hsotg);
5487 	if (ret) {
5488 		dev_err(hsotg->dev, "%s: failed to restore registers\n",
5489 			__func__);
5490 		return ret;
5491 	}
5492 
5493 	/* Restore device registers */
5494 	ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
5495 	if (ret) {
5496 		dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5497 			__func__);
5498 		return ret;
5499 	}
5500 
5501 	if (rem_wakeup) {
5502 		mdelay(10);
5503 		dctl = dwc2_readl(hsotg, DCTL);
5504 		dctl &= ~DCTL_RMTWKUPSIG;
5505 		dwc2_writel(hsotg, dctl, DCTL);
5506 	}
5507 
5508 	hsotg->hibernated = 0;
5509 	hsotg->lx_state = DWC2_L0;
5510 	dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
5511 
5512 	return ret;
5513 }
5514 
5515 /**
5516  * dwc2_gadget_enter_partial_power_down() - Put controller in partial
5517  * power down.
5518  *
5519  * @hsotg: Programming view of the DWC_otg controller
5520  *
5521  * Return: non-zero if failed to enter device partial power down.
5522  *
5523  * This function is for entering device mode partial power down.
5524  */
dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg * hsotg)5525 int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
5526 {
5527 	u32 pcgcctl;
5528 	int ret = 0;
5529 
5530 	dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
5531 
5532 	/* Backup all registers */
5533 	ret = dwc2_backup_global_registers(hsotg);
5534 	if (ret) {
5535 		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5536 			__func__);
5537 		return ret;
5538 	}
5539 
5540 	ret = dwc2_backup_device_registers(hsotg);
5541 	if (ret) {
5542 		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5543 			__func__);
5544 		return ret;
5545 	}
5546 
5547 	/*
5548 	 * Clear any pending interrupts since dwc2 will not be able to
5549 	 * clear them after entering partial_power_down.
5550 	 */
5551 	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5552 
5553 	/* Put the controller in low power state */
5554 	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5555 
5556 	pcgcctl |= PCGCTL_PWRCLMP;
5557 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5558 	udelay(5);
5559 
5560 	pcgcctl |= PCGCTL_RSTPDWNMODULE;
5561 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5562 	udelay(5);
5563 
5564 	pcgcctl |= PCGCTL_STOPPCLK;
5565 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5566 
5567 	/* Set in_ppd flag to 1 as here core enters suspend. */
5568 	hsotg->in_ppd = 1;
5569 	hsotg->lx_state = DWC2_L2;
5570 
5571 	dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
5572 
5573 	return ret;
5574 }
5575 
5576 /*
5577  * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
5578  * power down.
5579  *
5580  * @hsotg: Programming view of the DWC_otg controller
5581  * @restore: indicates whether need to restore the registers or not.
5582  *
5583  * Return: non-zero if failed to exit device partial power down.
5584  *
5585  * This function is for exiting from device mode partial power down.
5586  */
dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg * hsotg,bool restore)5587 int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
5588 					bool restore)
5589 {
5590 	u32 pcgcctl;
5591 	u32 dctl;
5592 	struct dwc2_dregs_backup *dr;
5593 	int ret = 0;
5594 
5595 	dr = &hsotg->dr_backup;
5596 
5597 	dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
5598 
5599 	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5600 	pcgcctl &= ~PCGCTL_STOPPCLK;
5601 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5602 
5603 	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5604 	pcgcctl &= ~PCGCTL_PWRCLMP;
5605 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5606 
5607 	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5608 	pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5609 	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5610 
5611 	udelay(100);
5612 	if (restore) {
5613 		ret = dwc2_restore_global_registers(hsotg);
5614 		if (ret) {
5615 			dev_err(hsotg->dev, "%s: failed to restore registers\n",
5616 				__func__);
5617 			return ret;
5618 		}
5619 		/* Restore DCFG */
5620 		dwc2_writel(hsotg, dr->dcfg, DCFG);
5621 
5622 		ret = dwc2_restore_device_registers(hsotg, 0);
5623 		if (ret) {
5624 			dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5625 				__func__);
5626 			return ret;
5627 		}
5628 	}
5629 
5630 	/* Set the Power-On Programming done bit */
5631 	dctl = dwc2_readl(hsotg, DCTL);
5632 	dctl |= DCTL_PWRONPRGDONE;
5633 	dwc2_writel(hsotg, dctl, DCTL);
5634 
5635 	/* Set in_ppd flag to 0 as here core exits from suspend. */
5636 	hsotg->in_ppd = 0;
5637 	hsotg->lx_state = DWC2_L0;
5638 
5639 	dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
5640 	return ret;
5641 }
5642 
5643 /**
5644  * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
5645  *
5646  * @hsotg: Programming view of the DWC_otg controller
5647  *
5648  * Return: non-zero if failed to enter device partial power down.
5649  *
5650  * This function is for entering device mode clock gating.
5651  */
dwc2_gadget_enter_clock_gating(struct dwc2_hsotg * hsotg)5652 void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
5653 {
5654 	u32 pcgctl;
5655 
5656 	dev_dbg(hsotg->dev, "Entering device clock gating.\n");
5657 
5658 	/* Set the Phy Clock bit as suspend is received. */
5659 	pcgctl = dwc2_readl(hsotg, PCGCTL);
5660 	pcgctl |= PCGCTL_STOPPCLK;
5661 	dwc2_writel(hsotg, pcgctl, PCGCTL);
5662 	udelay(5);
5663 
5664 	/* Set the Gate hclk as suspend is received. */
5665 	pcgctl = dwc2_readl(hsotg, PCGCTL);
5666 	pcgctl |= PCGCTL_GATEHCLK;
5667 	dwc2_writel(hsotg, pcgctl, PCGCTL);
5668 	udelay(5);
5669 
5670 	hsotg->lx_state = DWC2_L2;
5671 	hsotg->bus_suspended = true;
5672 }
5673 
5674 /*
5675  * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
5676  *
5677  * @hsotg: Programming view of the DWC_otg controller
5678  * @rem_wakeup: indicates whether remote wake up is enabled.
5679  *
5680  * This function is for exiting from device mode clock gating.
5681  */
dwc2_gadget_exit_clock_gating(struct dwc2_hsotg * hsotg,int rem_wakeup)5682 void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
5683 {
5684 	u32 pcgctl;
5685 	u32 dctl;
5686 
5687 	dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
5688 
5689 	/* Clear the Gate hclk. */
5690 	pcgctl = dwc2_readl(hsotg, PCGCTL);
5691 	pcgctl &= ~PCGCTL_GATEHCLK;
5692 	dwc2_writel(hsotg, pcgctl, PCGCTL);
5693 	udelay(5);
5694 
5695 	/* Phy Clock bit. */
5696 	pcgctl = dwc2_readl(hsotg, PCGCTL);
5697 	pcgctl &= ~PCGCTL_STOPPCLK;
5698 	dwc2_writel(hsotg, pcgctl, PCGCTL);
5699 	udelay(5);
5700 
5701 	if (rem_wakeup) {
5702 		/* Set Remote Wakeup Signaling */
5703 		dctl = dwc2_readl(hsotg, DCTL);
5704 		dctl |= DCTL_RMTWKUPSIG;
5705 		dwc2_writel(hsotg, dctl, DCTL);
5706 	}
5707 
5708 	/* Change to L0 state */
5709 	call_gadget(hsotg, resume);
5710 	hsotg->lx_state = DWC2_L0;
5711 	hsotg->bus_suspended = false;
5712 }
5713