xref: /linux/drivers/usb/dwc3/gadget.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /**
2  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3  *
4  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Authors: Felipe Balbi <balbi@ti.com>,
7  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2  of
11  * the License as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29 
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32 
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37 
38 /**
39  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40  * @dwc: pointer to our context structure
41  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42  *
43  * Caller should take care of locking. This function will
44  * return 0 on success or -EINVAL if wrong Test Selector
45  * is passed
46  */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 	u32		reg;
50 
51 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53 
54 	switch (mode) {
55 	case TEST_J:
56 	case TEST_K:
57 	case TEST_SE0_NAK:
58 	case TEST_PACKET:
59 	case TEST_FORCE_EN:
60 		reg |= mode << 1;
61 		break;
62 	default:
63 		return -EINVAL;
64 	}
65 
66 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67 
68 	return 0;
69 }
70 
71 /**
72  * dwc3_gadget_get_link_state - Gets current state of USB Link
73  * @dwc: pointer to our context structure
74  *
75  * Caller should take care of locking. This function will
76  * return the link state on success (>= 0) or -ETIMEDOUT.
77  */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 	u32		reg;
81 
82 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83 
84 	return DWC3_DSTS_USBLNKST(reg);
85 }
86 
87 /**
88  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89  * @dwc: pointer to our context structure
90  * @state: the state to put link into
91  *
92  * Caller should take care of locking. This function will
93  * return 0 on success or -ETIMEDOUT.
94  */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 	int		retries = 10000;
98 	u32		reg;
99 
100 	/*
101 	 * Wait until device controller is ready. Only applies to 1.94a and
102 	 * later RTL.
103 	 */
104 	if (dwc->revision >= DWC3_REVISION_194A) {
105 		while (--retries) {
106 			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 			if (reg & DWC3_DSTS_DCNRD)
108 				udelay(5);
109 			else
110 				break;
111 		}
112 
113 		if (retries <= 0)
114 			return -ETIMEDOUT;
115 	}
116 
117 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119 
120 	/* set requested state */
121 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123 
124 	/*
125 	 * The following code is racy when called from dwc3_gadget_wakeup,
126 	 * and is not needed, at least on newer versions
127 	 */
128 	if (dwc->revision >= DWC3_REVISION_194A)
129 		return 0;
130 
131 	/* wait for a change in DSTS */
132 	retries = 10000;
133 	while (--retries) {
134 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135 
136 		if (DWC3_DSTS_USBLNKST(reg) == state)
137 			return 0;
138 
139 		udelay(5);
140 	}
141 
142 	dwc3_trace(trace_dwc3_gadget,
143 			"link state change request timed out");
144 
145 	return -ETIMEDOUT;
146 }
147 
148 /**
149  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150  * @dwc: pointer to our context structure
151  *
152  * This function will a best effort FIFO allocation in order
153  * to improve FIFO usage and throughput, while still allowing
154  * us to enable as many endpoints as possible.
155  *
156  * Keep in mind that this operation will be highly dependent
157  * on the configured size for RAM1 - which contains TxFifo -,
158  * the amount of endpoints enabled on coreConsultant tool, and
159  * the width of the Master Bus.
160  *
161  * In the ideal world, we would always be able to satisfy the
162  * following equation:
163  *
164  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166  *
167  * Unfortunately, due to many variables that's not always the case.
168  */
169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170 {
171 	int		last_fifo_depth = 0;
172 	int		ram1_depth;
173 	int		fifo_size;
174 	int		mdwidth;
175 	int		num;
176 
177 	if (!dwc->needs_fifo_resize)
178 		return 0;
179 
180 	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182 
183 	/* MDWIDTH is represented in bits, we need it in bytes */
184 	mdwidth >>= 3;
185 
186 	/*
187 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 	 * for each enabled endpoint, later patches will come to
189 	 * improve this algorithm so that we better use the internal
190 	 * FIFO space
191 	 */
192 	for (num = 0; num < dwc->num_in_eps; num++) {
193 		/* bit0 indicates direction; 1 means IN ep */
194 		struct dwc3_ep	*dep = dwc->eps[(num << 1) | 1];
195 		int		mult = 1;
196 		int		tmp;
197 
198 		if (!(dep->flags & DWC3_EP_ENABLED))
199 			continue;
200 
201 		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 			mult = 3;
204 
205 		/*
206 		 * REVISIT: the following assumes we will always have enough
207 		 * space available on the FIFO RAM for all possible use cases.
208 		 * Make sure that's true somehow and change FIFO allocation
209 		 * accordingly.
210 		 *
211 		 * If we have Bulk or Isochronous endpoints, we want
212 		 * them to be able to be very, very fast. So we're giving
213 		 * those endpoints a fifo_size which is enough for 3 full
214 		 * packets
215 		 */
216 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 		tmp += mdwidth;
218 
219 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220 
221 		fifo_size |= (last_fifo_depth << 16);
222 
223 		dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 				dep->name, last_fifo_depth, fifo_size & 0xffff);
225 
226 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227 
228 		last_fifo_depth += (fifo_size & 0xffff);
229 	}
230 
231 	return 0;
232 }
233 
234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
235 		int status)
236 {
237 	struct dwc3			*dwc = dep->dwc;
238 	int				i;
239 
240 	if (req->queued) {
241 		i = 0;
242 		do {
243 			dep->busy_slot++;
244 			/*
245 			 * Skip LINK TRB. We can't use req->trb and check for
246 			 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
247 			 * just completed (not the LINK TRB).
248 			 */
249 			if (((dep->busy_slot & DWC3_TRB_MASK) ==
250 				DWC3_TRB_NUM- 1) &&
251 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
252 				dep->busy_slot++;
253 		} while(++i < req->request.num_mapped_sgs);
254 		req->queued = false;
255 	}
256 	list_del(&req->list);
257 	req->trb = NULL;
258 
259 	if (req->request.status == -EINPROGRESS)
260 		req->request.status = status;
261 
262 	if (dwc->ep0_bounced && dep->number == 0)
263 		dwc->ep0_bounced = false;
264 	else
265 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
266 				req->direction);
267 
268 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
269 			req, dep->name, req->request.actual,
270 			req->request.length, status);
271 	trace_dwc3_gadget_giveback(req);
272 
273 	spin_unlock(&dwc->lock);
274 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
275 	spin_lock(&dwc->lock);
276 }
277 
278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
279 {
280 	u32		timeout = 500;
281 	u32		reg;
282 
283 	trace_dwc3_gadget_generic_cmd(cmd, param);
284 
285 	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
286 	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
287 
288 	do {
289 		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
290 		if (!(reg & DWC3_DGCMD_CMDACT)) {
291 			dwc3_trace(trace_dwc3_gadget,
292 					"Command Complete --> %d",
293 					DWC3_DGCMD_STATUS(reg));
294 			if (DWC3_DGCMD_STATUS(reg))
295 				return -EINVAL;
296 			return 0;
297 		}
298 
299 		/*
300 		 * We can't sleep here, because it's also called from
301 		 * interrupt context.
302 		 */
303 		timeout--;
304 		if (!timeout) {
305 			dwc3_trace(trace_dwc3_gadget,
306 					"Command Timed Out");
307 			return -ETIMEDOUT;
308 		}
309 		udelay(1);
310 	} while (1);
311 }
312 
313 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
314 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
315 {
316 	struct dwc3_ep		*dep = dwc->eps[ep];
317 	u32			timeout = 500;
318 	u32			reg;
319 
320 	trace_dwc3_gadget_ep_cmd(dep, cmd, params);
321 
322 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
323 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
324 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
325 
326 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
327 	do {
328 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
329 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
330 			dwc3_trace(trace_dwc3_gadget,
331 					"Command Complete --> %d",
332 					DWC3_DEPCMD_STATUS(reg));
333 			if (DWC3_DEPCMD_STATUS(reg))
334 				return -EINVAL;
335 			return 0;
336 		}
337 
338 		/*
339 		 * We can't sleep here, because it is also called from
340 		 * interrupt context.
341 		 */
342 		timeout--;
343 		if (!timeout) {
344 			dwc3_trace(trace_dwc3_gadget,
345 					"Command Timed Out");
346 			return -ETIMEDOUT;
347 		}
348 
349 		udelay(1);
350 	} while (1);
351 }
352 
353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 		struct dwc3_trb *trb)
355 {
356 	u32		offset = (char *) trb - (char *) dep->trb_pool;
357 
358 	return dep->trb_pool_dma + offset;
359 }
360 
361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 	struct dwc3		*dwc = dep->dwc;
364 
365 	if (dep->trb_pool)
366 		return 0;
367 
368 	dep->trb_pool = dma_alloc_coherent(dwc->dev,
369 			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
370 			&dep->trb_pool_dma, GFP_KERNEL);
371 	if (!dep->trb_pool) {
372 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
373 				dep->name);
374 		return -ENOMEM;
375 	}
376 
377 	return 0;
378 }
379 
380 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
381 {
382 	struct dwc3		*dwc = dep->dwc;
383 
384 	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
385 			dep->trb_pool, dep->trb_pool_dma);
386 
387 	dep->trb_pool = NULL;
388 	dep->trb_pool_dma = 0;
389 }
390 
391 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
392 {
393 	struct dwc3_gadget_ep_cmd_params params;
394 	u32			cmd;
395 
396 	memset(&params, 0x00, sizeof(params));
397 
398 	if (dep->number != 1) {
399 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
400 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
401 		if (dep->number > 1) {
402 			if (dwc->start_config_issued)
403 				return 0;
404 			dwc->start_config_issued = true;
405 			cmd |= DWC3_DEPCMD_PARAM(2);
406 		}
407 
408 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
409 	}
410 
411 	return 0;
412 }
413 
414 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
415 		const struct usb_endpoint_descriptor *desc,
416 		const struct usb_ss_ep_comp_descriptor *comp_desc,
417 		bool ignore, bool restore)
418 {
419 	struct dwc3_gadget_ep_cmd_params params;
420 
421 	memset(&params, 0x00, sizeof(params));
422 
423 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
424 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
425 
426 	/* Burst size is only needed in SuperSpeed mode */
427 	if (dwc->gadget.speed == USB_SPEED_SUPER) {
428 		u32 burst = dep->endpoint.maxburst - 1;
429 
430 		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
431 	}
432 
433 	if (ignore)
434 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
435 
436 	if (restore) {
437 		params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
438 		params.param2 |= dep->saved_state;
439 	}
440 
441 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
442 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
443 
444 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
445 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
446 			| DWC3_DEPCFG_STREAM_EVENT_EN;
447 		dep->stream_capable = true;
448 	}
449 
450 	if (!usb_endpoint_xfer_control(desc))
451 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
452 
453 	/*
454 	 * We are doing 1:1 mapping for endpoints, meaning
455 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
456 	 * so on. We consider the direction bit as part of the physical
457 	 * endpoint number. So USB endpoint 0x81 is 0x03.
458 	 */
459 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
460 
461 	/*
462 	 * We must use the lower 16 TX FIFOs even though
463 	 * HW might have more
464 	 */
465 	if (dep->direction)
466 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
467 
468 	if (desc->bInterval) {
469 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
470 		dep->interval = 1 << (desc->bInterval - 1);
471 	}
472 
473 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
474 			DWC3_DEPCMD_SETEPCONFIG, &params);
475 }
476 
477 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
478 {
479 	struct dwc3_gadget_ep_cmd_params params;
480 
481 	memset(&params, 0x00, sizeof(params));
482 
483 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
484 
485 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
486 			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
487 }
488 
489 /**
490  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
491  * @dep: endpoint to be initialized
492  * @desc: USB Endpoint Descriptor
493  *
494  * Caller should take care of locking
495  */
496 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
497 		const struct usb_endpoint_descriptor *desc,
498 		const struct usb_ss_ep_comp_descriptor *comp_desc,
499 		bool ignore, bool restore)
500 {
501 	struct dwc3		*dwc = dep->dwc;
502 	u32			reg;
503 	int			ret;
504 
505 	dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
506 
507 	if (!(dep->flags & DWC3_EP_ENABLED)) {
508 		ret = dwc3_gadget_start_config(dwc, dep);
509 		if (ret)
510 			return ret;
511 	}
512 
513 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
514 			restore);
515 	if (ret)
516 		return ret;
517 
518 	if (!(dep->flags & DWC3_EP_ENABLED)) {
519 		struct dwc3_trb	*trb_st_hw;
520 		struct dwc3_trb	*trb_link;
521 
522 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
523 		if (ret)
524 			return ret;
525 
526 		dep->endpoint.desc = desc;
527 		dep->comp_desc = comp_desc;
528 		dep->type = usb_endpoint_type(desc);
529 		dep->flags |= DWC3_EP_ENABLED;
530 
531 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
532 		reg |= DWC3_DALEPENA_EP(dep->number);
533 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
534 
535 		if (!usb_endpoint_xfer_isoc(desc))
536 			return 0;
537 
538 		/* Link TRB for ISOC. The HWO bit is never reset */
539 		trb_st_hw = &dep->trb_pool[0];
540 
541 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
542 		memset(trb_link, 0, sizeof(*trb_link));
543 
544 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
545 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
546 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
547 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
548 	}
549 
550 	return 0;
551 }
552 
553 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
554 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
555 {
556 	struct dwc3_request		*req;
557 
558 	if (!list_empty(&dep->req_queued)) {
559 		dwc3_stop_active_transfer(dwc, dep->number, true);
560 
561 		/* - giveback all requests to gadget driver */
562 		while (!list_empty(&dep->req_queued)) {
563 			req = next_request(&dep->req_queued);
564 
565 			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
566 		}
567 	}
568 
569 	while (!list_empty(&dep->request_list)) {
570 		req = next_request(&dep->request_list);
571 
572 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
573 	}
574 }
575 
576 /**
577  * __dwc3_gadget_ep_disable - Disables a HW endpoint
578  * @dep: the endpoint to disable
579  *
580  * This function also removes requests which are currently processed ny the
581  * hardware and those which are not yet scheduled.
582  * Caller should take care of locking.
583  */
584 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
585 {
586 	struct dwc3		*dwc = dep->dwc;
587 	u32			reg;
588 
589 	dwc3_remove_requests(dwc, dep);
590 
591 	/* make sure HW endpoint isn't stalled */
592 	if (dep->flags & DWC3_EP_STALL)
593 		__dwc3_gadget_ep_set_halt(dep, 0, false);
594 
595 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
596 	reg &= ~DWC3_DALEPENA_EP(dep->number);
597 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
598 
599 	dep->stream_capable = false;
600 	dep->endpoint.desc = NULL;
601 	dep->comp_desc = NULL;
602 	dep->type = 0;
603 	dep->flags = 0;
604 
605 	return 0;
606 }
607 
608 /* -------------------------------------------------------------------------- */
609 
610 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
611 		const struct usb_endpoint_descriptor *desc)
612 {
613 	return -EINVAL;
614 }
615 
616 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
617 {
618 	return -EINVAL;
619 }
620 
621 /* -------------------------------------------------------------------------- */
622 
623 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
624 		const struct usb_endpoint_descriptor *desc)
625 {
626 	struct dwc3_ep			*dep;
627 	struct dwc3			*dwc;
628 	unsigned long			flags;
629 	int				ret;
630 
631 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
632 		pr_debug("dwc3: invalid parameters\n");
633 		return -EINVAL;
634 	}
635 
636 	if (!desc->wMaxPacketSize) {
637 		pr_debug("dwc3: missing wMaxPacketSize\n");
638 		return -EINVAL;
639 	}
640 
641 	dep = to_dwc3_ep(ep);
642 	dwc = dep->dwc;
643 
644 	if (dep->flags & DWC3_EP_ENABLED) {
645 		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
646 				dep->name);
647 		return 0;
648 	}
649 
650 	switch (usb_endpoint_type(desc)) {
651 	case USB_ENDPOINT_XFER_CONTROL:
652 		strlcat(dep->name, "-control", sizeof(dep->name));
653 		break;
654 	case USB_ENDPOINT_XFER_ISOC:
655 		strlcat(dep->name, "-isoc", sizeof(dep->name));
656 		break;
657 	case USB_ENDPOINT_XFER_BULK:
658 		strlcat(dep->name, "-bulk", sizeof(dep->name));
659 		break;
660 	case USB_ENDPOINT_XFER_INT:
661 		strlcat(dep->name, "-int", sizeof(dep->name));
662 		break;
663 	default:
664 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
665 	}
666 
667 	spin_lock_irqsave(&dwc->lock, flags);
668 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
669 	spin_unlock_irqrestore(&dwc->lock, flags);
670 
671 	return ret;
672 }
673 
674 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
675 {
676 	struct dwc3_ep			*dep;
677 	struct dwc3			*dwc;
678 	unsigned long			flags;
679 	int				ret;
680 
681 	if (!ep) {
682 		pr_debug("dwc3: invalid parameters\n");
683 		return -EINVAL;
684 	}
685 
686 	dep = to_dwc3_ep(ep);
687 	dwc = dep->dwc;
688 
689 	if (!(dep->flags & DWC3_EP_ENABLED)) {
690 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
691 				dep->name);
692 		return 0;
693 	}
694 
695 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
696 			dep->number >> 1,
697 			(dep->number & 1) ? "in" : "out");
698 
699 	spin_lock_irqsave(&dwc->lock, flags);
700 	ret = __dwc3_gadget_ep_disable(dep);
701 	spin_unlock_irqrestore(&dwc->lock, flags);
702 
703 	return ret;
704 }
705 
706 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
707 	gfp_t gfp_flags)
708 {
709 	struct dwc3_request		*req;
710 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
711 
712 	req = kzalloc(sizeof(*req), gfp_flags);
713 	if (!req)
714 		return NULL;
715 
716 	req->epnum	= dep->number;
717 	req->dep	= dep;
718 
719 	trace_dwc3_alloc_request(req);
720 
721 	return &req->request;
722 }
723 
724 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
725 		struct usb_request *request)
726 {
727 	struct dwc3_request		*req = to_dwc3_request(request);
728 
729 	trace_dwc3_free_request(req);
730 	kfree(req);
731 }
732 
733 /**
734  * dwc3_prepare_one_trb - setup one TRB from one request
735  * @dep: endpoint for which this request is prepared
736  * @req: dwc3_request pointer
737  */
738 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
739 		struct dwc3_request *req, dma_addr_t dma,
740 		unsigned length, unsigned last, unsigned chain, unsigned node)
741 {
742 	struct dwc3_trb		*trb;
743 
744 	dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
745 			dep->name, req, (unsigned long long) dma,
746 			length, last ? " last" : "",
747 			chain ? " chain" : "");
748 
749 
750 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
751 
752 	if (!req->trb) {
753 		dwc3_gadget_move_request_queued(req);
754 		req->trb = trb;
755 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
756 		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
757 	}
758 
759 	dep->free_slot++;
760 	/* Skip the LINK-TRB on ISOC */
761 	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
762 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
763 		dep->free_slot++;
764 
765 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
766 	trb->bpl = lower_32_bits(dma);
767 	trb->bph = upper_32_bits(dma);
768 
769 	switch (usb_endpoint_type(dep->endpoint.desc)) {
770 	case USB_ENDPOINT_XFER_CONTROL:
771 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
772 		break;
773 
774 	case USB_ENDPOINT_XFER_ISOC:
775 		if (!node)
776 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
777 		else
778 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
779 		break;
780 
781 	case USB_ENDPOINT_XFER_BULK:
782 	case USB_ENDPOINT_XFER_INT:
783 		trb->ctrl = DWC3_TRBCTL_NORMAL;
784 		break;
785 	default:
786 		/*
787 		 * This is only possible with faulty memory because we
788 		 * checked it already :)
789 		 */
790 		BUG();
791 	}
792 
793 	if (!req->request.no_interrupt && !chain)
794 		trb->ctrl |= DWC3_TRB_CTRL_IOC;
795 
796 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
797 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
798 		trb->ctrl |= DWC3_TRB_CTRL_CSP;
799 	} else if (last) {
800 		trb->ctrl |= DWC3_TRB_CTRL_LST;
801 	}
802 
803 	if (chain)
804 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
805 
806 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
807 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
808 
809 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
810 
811 	trace_dwc3_prepare_trb(dep, trb);
812 }
813 
814 /*
815  * dwc3_prepare_trbs - setup TRBs from requests
816  * @dep: endpoint for which requests are being prepared
817  * @starting: true if the endpoint is idle and no requests are queued.
818  *
819  * The function goes through the requests list and sets up TRBs for the
820  * transfers. The function returns once there are no more TRBs available or
821  * it runs out of requests.
822  */
823 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
824 {
825 	struct dwc3_request	*req, *n;
826 	u32			trbs_left;
827 	u32			max;
828 	unsigned int		last_one = 0;
829 
830 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
831 
832 	/* the first request must not be queued */
833 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
834 
835 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
836 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
837 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
838 		if (trbs_left > max)
839 			trbs_left = max;
840 	}
841 
842 	/*
843 	 * If busy & slot are equal than it is either full or empty. If we are
844 	 * starting to process requests then we are empty. Otherwise we are
845 	 * full and don't do anything
846 	 */
847 	if (!trbs_left) {
848 		if (!starting)
849 			return;
850 		trbs_left = DWC3_TRB_NUM;
851 		/*
852 		 * In case we start from scratch, we queue the ISOC requests
853 		 * starting from slot 1. This is done because we use ring
854 		 * buffer and have no LST bit to stop us. Instead, we place
855 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
856 		 * after the first request so we start at slot 1 and have
857 		 * 7 requests proceed before we hit the first IOC.
858 		 * Other transfer types don't use the ring buffer and are
859 		 * processed from the first TRB until the last one. Since we
860 		 * don't wrap around we have to start at the beginning.
861 		 */
862 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
863 			dep->busy_slot = 1;
864 			dep->free_slot = 1;
865 		} else {
866 			dep->busy_slot = 0;
867 			dep->free_slot = 0;
868 		}
869 	}
870 
871 	/* The last TRB is a link TRB, not used for xfer */
872 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
873 		return;
874 
875 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
876 		unsigned	length;
877 		dma_addr_t	dma;
878 		last_one = false;
879 
880 		if (req->request.num_mapped_sgs > 0) {
881 			struct usb_request *request = &req->request;
882 			struct scatterlist *sg = request->sg;
883 			struct scatterlist *s;
884 			int		i;
885 
886 			for_each_sg(sg, s, request->num_mapped_sgs, i) {
887 				unsigned chain = true;
888 
889 				length = sg_dma_len(s);
890 				dma = sg_dma_address(s);
891 
892 				if (i == (request->num_mapped_sgs - 1) ||
893 						sg_is_last(s)) {
894 					if (list_empty(&dep->request_list))
895 						last_one = true;
896 					chain = false;
897 				}
898 
899 				trbs_left--;
900 				if (!trbs_left)
901 					last_one = true;
902 
903 				if (last_one)
904 					chain = false;
905 
906 				dwc3_prepare_one_trb(dep, req, dma, length,
907 						last_one, chain, i);
908 
909 				if (last_one)
910 					break;
911 			}
912 
913 			if (last_one)
914 				break;
915 		} else {
916 			dma = req->request.dma;
917 			length = req->request.length;
918 			trbs_left--;
919 
920 			if (!trbs_left)
921 				last_one = 1;
922 
923 			/* Is this the last request? */
924 			if (list_is_last(&req->list, &dep->request_list))
925 				last_one = 1;
926 
927 			dwc3_prepare_one_trb(dep, req, dma, length,
928 					last_one, false, 0);
929 
930 			if (last_one)
931 				break;
932 		}
933 	}
934 }
935 
936 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
937 		int start_new)
938 {
939 	struct dwc3_gadget_ep_cmd_params params;
940 	struct dwc3_request		*req;
941 	struct dwc3			*dwc = dep->dwc;
942 	int				ret;
943 	u32				cmd;
944 
945 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
946 		dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
947 		return -EBUSY;
948 	}
949 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
950 
951 	/*
952 	 * If we are getting here after a short-out-packet we don't enqueue any
953 	 * new requests as we try to set the IOC bit only on the last request.
954 	 */
955 	if (start_new) {
956 		if (list_empty(&dep->req_queued))
957 			dwc3_prepare_trbs(dep, start_new);
958 
959 		/* req points to the first request which will be sent */
960 		req = next_request(&dep->req_queued);
961 	} else {
962 		dwc3_prepare_trbs(dep, start_new);
963 
964 		/*
965 		 * req points to the first request where HWO changed from 0 to 1
966 		 */
967 		req = next_request(&dep->req_queued);
968 	}
969 	if (!req) {
970 		dep->flags |= DWC3_EP_PENDING_REQUEST;
971 		return 0;
972 	}
973 
974 	memset(&params, 0, sizeof(params));
975 
976 	if (start_new) {
977 		params.param0 = upper_32_bits(req->trb_dma);
978 		params.param1 = lower_32_bits(req->trb_dma);
979 		cmd = DWC3_DEPCMD_STARTTRANSFER;
980 	} else {
981 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
982 	}
983 
984 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
985 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
986 	if (ret < 0) {
987 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
988 
989 		/*
990 		 * FIXME we need to iterate over the list of requests
991 		 * here and stop, unmap, free and del each of the linked
992 		 * requests instead of what we do now.
993 		 */
994 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
995 				req->direction);
996 		list_del(&req->list);
997 		return ret;
998 	}
999 
1000 	dep->flags |= DWC3_EP_BUSY;
1001 
1002 	if (start_new) {
1003 		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1004 				dep->number);
1005 		WARN_ON_ONCE(!dep->resource_index);
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1012 		struct dwc3_ep *dep, u32 cur_uf)
1013 {
1014 	u32 uf;
1015 
1016 	if (list_empty(&dep->request_list)) {
1017 		dwc3_trace(trace_dwc3_gadget,
1018 				"ISOC ep %s run out for requests",
1019 				dep->name);
1020 		dep->flags |= DWC3_EP_PENDING_REQUEST;
1021 		return;
1022 	}
1023 
1024 	/* 4 micro frames in the future */
1025 	uf = cur_uf + dep->interval * 4;
1026 
1027 	__dwc3_gadget_kick_transfer(dep, uf, 1);
1028 }
1029 
1030 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1031 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1032 {
1033 	u32 cur_uf, mask;
1034 
1035 	mask = ~(dep->interval - 1);
1036 	cur_uf = event->parameters & mask;
1037 
1038 	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1039 }
1040 
1041 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1042 {
1043 	struct dwc3		*dwc = dep->dwc;
1044 	int			ret;
1045 
1046 	req->request.actual	= 0;
1047 	req->request.status	= -EINPROGRESS;
1048 	req->direction		= dep->direction;
1049 	req->epnum		= dep->number;
1050 
1051 	/*
1052 	 * We only add to our list of requests now and
1053 	 * start consuming the list once we get XferNotReady
1054 	 * IRQ.
1055 	 *
1056 	 * That way, we avoid doing anything that we don't need
1057 	 * to do now and defer it until the point we receive a
1058 	 * particular token from the Host side.
1059 	 *
1060 	 * This will also avoid Host cancelling URBs due to too
1061 	 * many NAKs.
1062 	 */
1063 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1064 			dep->direction);
1065 	if (ret)
1066 		return ret;
1067 
1068 	list_add_tail(&req->list, &dep->request_list);
1069 
1070 	/*
1071 	 * There are a few special cases:
1072 	 *
1073 	 * 1. XferNotReady with empty list of requests. We need to kick the
1074 	 *    transfer here in that situation, otherwise we will be NAKing
1075 	 *    forever. If we get XferNotReady before gadget driver has a
1076 	 *    chance to queue a request, we will ACK the IRQ but won't be
1077 	 *    able to receive the data until the next request is queued.
1078 	 *    The following code is handling exactly that.
1079 	 *
1080 	 */
1081 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1082 		/*
1083 		 * If xfernotready is already elapsed and it is a case
1084 		 * of isoc transfer, then issue END TRANSFER, so that
1085 		 * you can receive xfernotready again and can have
1086 		 * notion of current microframe.
1087 		 */
1088 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1089 			if (list_empty(&dep->req_queued)) {
1090 				dwc3_stop_active_transfer(dwc, dep->number, true);
1091 				dep->flags = DWC3_EP_ENABLED;
1092 			}
1093 			return 0;
1094 		}
1095 
1096 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1097 		if (ret && ret != -EBUSY)
1098 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1099 					dep->name);
1100 		return ret;
1101 	}
1102 
1103 	/*
1104 	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1105 	 *    kick the transfer here after queuing a request, otherwise the
1106 	 *    core may not see the modified TRB(s).
1107 	 */
1108 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1109 			(dep->flags & DWC3_EP_BUSY) &&
1110 			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
1111 		WARN_ON_ONCE(!dep->resource_index);
1112 		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1113 				false);
1114 		if (ret && ret != -EBUSY)
1115 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1116 					dep->name);
1117 		return ret;
1118 	}
1119 
1120 	/*
1121 	 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1122 	 * right away, otherwise host will not know we have streams to be
1123 	 * handled.
1124 	 */
1125 	if (dep->stream_capable) {
1126 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1127 		if (ret && ret != -EBUSY)
1128 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1129 					dep->name);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1136 	gfp_t gfp_flags)
1137 {
1138 	struct dwc3_request		*req = to_dwc3_request(request);
1139 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1140 	struct dwc3			*dwc = dep->dwc;
1141 
1142 	unsigned long			flags;
1143 
1144 	int				ret;
1145 
1146 	spin_lock_irqsave(&dwc->lock, flags);
1147 	if (!dep->endpoint.desc) {
1148 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1149 				request, ep->name);
1150 		ret = -ESHUTDOWN;
1151 		goto out;
1152 	}
1153 
1154 	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1155 				request, req->dep->name)) {
1156 		ret = -EINVAL;
1157 		goto out;
1158 	}
1159 
1160 	trace_dwc3_ep_queue(req);
1161 
1162 	ret = __dwc3_gadget_ep_queue(dep, req);
1163 
1164 out:
1165 	spin_unlock_irqrestore(&dwc->lock, flags);
1166 
1167 	return ret;
1168 }
1169 
1170 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1171 		struct usb_request *request)
1172 {
1173 	struct dwc3_request		*req = to_dwc3_request(request);
1174 	struct dwc3_request		*r = NULL;
1175 
1176 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1177 	struct dwc3			*dwc = dep->dwc;
1178 
1179 	unsigned long			flags;
1180 	int				ret = 0;
1181 
1182 	trace_dwc3_ep_dequeue(req);
1183 
1184 	spin_lock_irqsave(&dwc->lock, flags);
1185 
1186 	list_for_each_entry(r, &dep->request_list, list) {
1187 		if (r == req)
1188 			break;
1189 	}
1190 
1191 	if (r != req) {
1192 		list_for_each_entry(r, &dep->req_queued, list) {
1193 			if (r == req)
1194 				break;
1195 		}
1196 		if (r == req) {
1197 			/* wait until it is processed */
1198 			dwc3_stop_active_transfer(dwc, dep->number, true);
1199 			goto out1;
1200 		}
1201 		dev_err(dwc->dev, "request %p was not queued to %s\n",
1202 				request, ep->name);
1203 		ret = -EINVAL;
1204 		goto out0;
1205 	}
1206 
1207 out1:
1208 	/* giveback the request */
1209 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1210 
1211 out0:
1212 	spin_unlock_irqrestore(&dwc->lock, flags);
1213 
1214 	return ret;
1215 }
1216 
1217 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1218 {
1219 	struct dwc3_gadget_ep_cmd_params	params;
1220 	struct dwc3				*dwc = dep->dwc;
1221 	int					ret;
1222 
1223 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1224 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1225 		return -EINVAL;
1226 	}
1227 
1228 	memset(&params, 0x00, sizeof(params));
1229 
1230 	if (value) {
1231 		if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1232 				(!list_empty(&dep->req_queued) ||
1233 				 !list_empty(&dep->request_list)))) {
1234 			dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1235 					dep->name);
1236 			return -EAGAIN;
1237 		}
1238 
1239 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1240 			DWC3_DEPCMD_SETSTALL, &params);
1241 		if (ret)
1242 			dev_err(dwc->dev, "failed to set STALL on %s\n",
1243 					dep->name);
1244 		else
1245 			dep->flags |= DWC3_EP_STALL;
1246 	} else {
1247 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1248 			DWC3_DEPCMD_CLEARSTALL, &params);
1249 		if (ret)
1250 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
1251 					dep->name);
1252 		else
1253 			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1254 	}
1255 
1256 	return ret;
1257 }
1258 
1259 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1260 {
1261 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1262 	struct dwc3			*dwc = dep->dwc;
1263 
1264 	unsigned long			flags;
1265 
1266 	int				ret;
1267 
1268 	spin_lock_irqsave(&dwc->lock, flags);
1269 	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1270 	spin_unlock_irqrestore(&dwc->lock, flags);
1271 
1272 	return ret;
1273 }
1274 
1275 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1276 {
1277 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1278 	struct dwc3			*dwc = dep->dwc;
1279 	unsigned long			flags;
1280 	int				ret;
1281 
1282 	spin_lock_irqsave(&dwc->lock, flags);
1283 	dep->flags |= DWC3_EP_WEDGE;
1284 
1285 	if (dep->number == 0 || dep->number == 1)
1286 		ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1287 	else
1288 		ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1289 	spin_unlock_irqrestore(&dwc->lock, flags);
1290 
1291 	return ret;
1292 }
1293 
1294 /* -------------------------------------------------------------------------- */
1295 
1296 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1297 	.bLength	= USB_DT_ENDPOINT_SIZE,
1298 	.bDescriptorType = USB_DT_ENDPOINT,
1299 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1300 };
1301 
1302 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1303 	.enable		= dwc3_gadget_ep0_enable,
1304 	.disable	= dwc3_gadget_ep0_disable,
1305 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1306 	.free_request	= dwc3_gadget_ep_free_request,
1307 	.queue		= dwc3_gadget_ep0_queue,
1308 	.dequeue	= dwc3_gadget_ep_dequeue,
1309 	.set_halt	= dwc3_gadget_ep0_set_halt,
1310 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1311 };
1312 
1313 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1314 	.enable		= dwc3_gadget_ep_enable,
1315 	.disable	= dwc3_gadget_ep_disable,
1316 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1317 	.free_request	= dwc3_gadget_ep_free_request,
1318 	.queue		= dwc3_gadget_ep_queue,
1319 	.dequeue	= dwc3_gadget_ep_dequeue,
1320 	.set_halt	= dwc3_gadget_ep_set_halt,
1321 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1322 };
1323 
1324 /* -------------------------------------------------------------------------- */
1325 
1326 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1327 {
1328 	struct dwc3		*dwc = gadget_to_dwc(g);
1329 	u32			reg;
1330 
1331 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1332 	return DWC3_DSTS_SOFFN(reg);
1333 }
1334 
1335 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1336 {
1337 	struct dwc3		*dwc = gadget_to_dwc(g);
1338 
1339 	unsigned long		timeout;
1340 	unsigned long		flags;
1341 
1342 	u32			reg;
1343 
1344 	int			ret = 0;
1345 
1346 	u8			link_state;
1347 	u8			speed;
1348 
1349 	spin_lock_irqsave(&dwc->lock, flags);
1350 
1351 	/*
1352 	 * According to the Databook Remote wakeup request should
1353 	 * be issued only when the device is in early suspend state.
1354 	 *
1355 	 * We can check that via USB Link State bits in DSTS register.
1356 	 */
1357 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1358 
1359 	speed = reg & DWC3_DSTS_CONNECTSPD;
1360 	if (speed == DWC3_DSTS_SUPERSPEED) {
1361 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1362 		ret = -EINVAL;
1363 		goto out;
1364 	}
1365 
1366 	link_state = DWC3_DSTS_USBLNKST(reg);
1367 
1368 	switch (link_state) {
1369 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1370 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1371 		break;
1372 	default:
1373 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1374 				link_state);
1375 		ret = -EINVAL;
1376 		goto out;
1377 	}
1378 
1379 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1380 	if (ret < 0) {
1381 		dev_err(dwc->dev, "failed to put link in Recovery\n");
1382 		goto out;
1383 	}
1384 
1385 	/* Recent versions do this automatically */
1386 	if (dwc->revision < DWC3_REVISION_194A) {
1387 		/* write zeroes to Link Change Request */
1388 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1389 		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1390 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1391 	}
1392 
1393 	/* poll until Link State changes to ON */
1394 	timeout = jiffies + msecs_to_jiffies(100);
1395 
1396 	while (!time_after(jiffies, timeout)) {
1397 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1398 
1399 		/* in HS, means ON */
1400 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1401 			break;
1402 	}
1403 
1404 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1405 		dev_err(dwc->dev, "failed to send remote wakeup\n");
1406 		ret = -EINVAL;
1407 	}
1408 
1409 out:
1410 	spin_unlock_irqrestore(&dwc->lock, flags);
1411 
1412 	return ret;
1413 }
1414 
1415 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1416 		int is_selfpowered)
1417 {
1418 	struct dwc3		*dwc = gadget_to_dwc(g);
1419 	unsigned long		flags;
1420 
1421 	spin_lock_irqsave(&dwc->lock, flags);
1422 	g->is_selfpowered = !!is_selfpowered;
1423 	spin_unlock_irqrestore(&dwc->lock, flags);
1424 
1425 	return 0;
1426 }
1427 
1428 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1429 {
1430 	u32			reg;
1431 	u32			timeout = 500;
1432 
1433 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1434 	if (is_on) {
1435 		if (dwc->revision <= DWC3_REVISION_187A) {
1436 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
1437 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
1438 		}
1439 
1440 		if (dwc->revision >= DWC3_REVISION_194A)
1441 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1442 		reg |= DWC3_DCTL_RUN_STOP;
1443 
1444 		if (dwc->has_hibernation)
1445 			reg |= DWC3_DCTL_KEEP_CONNECT;
1446 
1447 		dwc->pullups_connected = true;
1448 	} else {
1449 		reg &= ~DWC3_DCTL_RUN_STOP;
1450 
1451 		if (dwc->has_hibernation && !suspend)
1452 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1453 
1454 		dwc->pullups_connected = false;
1455 	}
1456 
1457 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1458 
1459 	do {
1460 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1461 		if (is_on) {
1462 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1463 				break;
1464 		} else {
1465 			if (reg & DWC3_DSTS_DEVCTRLHLT)
1466 				break;
1467 		}
1468 		timeout--;
1469 		if (!timeout)
1470 			return -ETIMEDOUT;
1471 		udelay(1);
1472 	} while (1);
1473 
1474 	dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1475 			dwc->gadget_driver
1476 			? dwc->gadget_driver->function : "no-function",
1477 			is_on ? "connect" : "disconnect");
1478 
1479 	return 0;
1480 }
1481 
1482 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1483 {
1484 	struct dwc3		*dwc = gadget_to_dwc(g);
1485 	unsigned long		flags;
1486 	int			ret;
1487 
1488 	is_on = !!is_on;
1489 
1490 	spin_lock_irqsave(&dwc->lock, flags);
1491 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
1492 	spin_unlock_irqrestore(&dwc->lock, flags);
1493 
1494 	return ret;
1495 }
1496 
1497 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1498 {
1499 	u32			reg;
1500 
1501 	/* Enable all but Start and End of Frame IRQs */
1502 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1503 			DWC3_DEVTEN_EVNTOVERFLOWEN |
1504 			DWC3_DEVTEN_CMDCMPLTEN |
1505 			DWC3_DEVTEN_ERRTICERREN |
1506 			DWC3_DEVTEN_WKUPEVTEN |
1507 			DWC3_DEVTEN_ULSTCNGEN |
1508 			DWC3_DEVTEN_CONNECTDONEEN |
1509 			DWC3_DEVTEN_USBRSTEN |
1510 			DWC3_DEVTEN_DISCONNEVTEN);
1511 
1512 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1513 }
1514 
1515 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1516 {
1517 	/* mask all interrupts */
1518 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1519 }
1520 
1521 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1522 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1523 
1524 static int dwc3_gadget_start(struct usb_gadget *g,
1525 		struct usb_gadget_driver *driver)
1526 {
1527 	struct dwc3		*dwc = gadget_to_dwc(g);
1528 	struct dwc3_ep		*dep;
1529 	unsigned long		flags;
1530 	int			ret = 0;
1531 	int			irq;
1532 	u32			reg;
1533 
1534 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1535 	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1536 			IRQF_SHARED, "dwc3", dwc);
1537 	if (ret) {
1538 		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1539 				irq, ret);
1540 		goto err0;
1541 	}
1542 
1543 	spin_lock_irqsave(&dwc->lock, flags);
1544 
1545 	if (dwc->gadget_driver) {
1546 		dev_err(dwc->dev, "%s is already bound to %s\n",
1547 				dwc->gadget.name,
1548 				dwc->gadget_driver->driver.name);
1549 		ret = -EBUSY;
1550 		goto err1;
1551 	}
1552 
1553 	dwc->gadget_driver	= driver;
1554 
1555 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1556 	reg &= ~(DWC3_DCFG_SPEED_MASK);
1557 
1558 	/**
1559 	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1560 	 * which would cause metastability state on Run/Stop
1561 	 * bit if we try to force the IP to USB2-only mode.
1562 	 *
1563 	 * Because of that, we cannot configure the IP to any
1564 	 * speed other than the SuperSpeed
1565 	 *
1566 	 * Refers to:
1567 	 *
1568 	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1569 	 * USB 2.0 Mode
1570 	 */
1571 	if (dwc->revision < DWC3_REVISION_220A) {
1572 		reg |= DWC3_DCFG_SUPERSPEED;
1573 	} else {
1574 		switch (dwc->maximum_speed) {
1575 		case USB_SPEED_LOW:
1576 			reg |= DWC3_DSTS_LOWSPEED;
1577 			break;
1578 		case USB_SPEED_FULL:
1579 			reg |= DWC3_DSTS_FULLSPEED1;
1580 			break;
1581 		case USB_SPEED_HIGH:
1582 			reg |= DWC3_DSTS_HIGHSPEED;
1583 			break;
1584 		case USB_SPEED_SUPER:	/* FALLTHROUGH */
1585 		case USB_SPEED_UNKNOWN:	/* FALTHROUGH */
1586 		default:
1587 			reg |= DWC3_DSTS_SUPERSPEED;
1588 		}
1589 	}
1590 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1591 
1592 	dwc->start_config_issued = false;
1593 
1594 	/* Start with SuperSpeed Default */
1595 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1596 
1597 	dep = dwc->eps[0];
1598 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1599 			false);
1600 	if (ret) {
1601 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1602 		goto err2;
1603 	}
1604 
1605 	dep = dwc->eps[1];
1606 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1607 			false);
1608 	if (ret) {
1609 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1610 		goto err3;
1611 	}
1612 
1613 	/* begin to receive SETUP packets */
1614 	dwc->ep0state = EP0_SETUP_PHASE;
1615 	dwc3_ep0_out_start(dwc);
1616 
1617 	dwc3_gadget_enable_irq(dwc);
1618 
1619 	spin_unlock_irqrestore(&dwc->lock, flags);
1620 
1621 	return 0;
1622 
1623 err3:
1624 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1625 
1626 err2:
1627 	dwc->gadget_driver = NULL;
1628 
1629 err1:
1630 	spin_unlock_irqrestore(&dwc->lock, flags);
1631 
1632 	free_irq(irq, dwc);
1633 
1634 err0:
1635 	return ret;
1636 }
1637 
1638 static int dwc3_gadget_stop(struct usb_gadget *g)
1639 {
1640 	struct dwc3		*dwc = gadget_to_dwc(g);
1641 	unsigned long		flags;
1642 	int			irq;
1643 
1644 	spin_lock_irqsave(&dwc->lock, flags);
1645 
1646 	dwc3_gadget_disable_irq(dwc);
1647 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1648 	__dwc3_gadget_ep_disable(dwc->eps[1]);
1649 
1650 	dwc->gadget_driver	= NULL;
1651 
1652 	spin_unlock_irqrestore(&dwc->lock, flags);
1653 
1654 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1655 	free_irq(irq, dwc);
1656 
1657 	return 0;
1658 }
1659 
1660 static const struct usb_gadget_ops dwc3_gadget_ops = {
1661 	.get_frame		= dwc3_gadget_get_frame,
1662 	.wakeup			= dwc3_gadget_wakeup,
1663 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1664 	.pullup			= dwc3_gadget_pullup,
1665 	.udc_start		= dwc3_gadget_start,
1666 	.udc_stop		= dwc3_gadget_stop,
1667 };
1668 
1669 /* -------------------------------------------------------------------------- */
1670 
1671 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1672 		u8 num, u32 direction)
1673 {
1674 	struct dwc3_ep			*dep;
1675 	u8				i;
1676 
1677 	for (i = 0; i < num; i++) {
1678 		u8 epnum = (i << 1) | (!!direction);
1679 
1680 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1681 		if (!dep)
1682 			return -ENOMEM;
1683 
1684 		dep->dwc = dwc;
1685 		dep->number = epnum;
1686 		dep->direction = !!direction;
1687 		dwc->eps[epnum] = dep;
1688 
1689 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1690 				(epnum & 1) ? "in" : "out");
1691 
1692 		dep->endpoint.name = dep->name;
1693 
1694 		dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1695 
1696 		if (epnum == 0 || epnum == 1) {
1697 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1698 			dep->endpoint.maxburst = 1;
1699 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1700 			if (!epnum)
1701 				dwc->gadget.ep0 = &dep->endpoint;
1702 		} else {
1703 			int		ret;
1704 
1705 			usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1706 			dep->endpoint.max_streams = 15;
1707 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1708 			list_add_tail(&dep->endpoint.ep_list,
1709 					&dwc->gadget.ep_list);
1710 
1711 			ret = dwc3_alloc_trb_pool(dep);
1712 			if (ret)
1713 				return ret;
1714 		}
1715 
1716 		INIT_LIST_HEAD(&dep->request_list);
1717 		INIT_LIST_HEAD(&dep->req_queued);
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1724 {
1725 	int				ret;
1726 
1727 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1728 
1729 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1730 	if (ret < 0) {
1731 		dwc3_trace(trace_dwc3_gadget,
1732 				"failed to allocate OUT endpoints");
1733 		return ret;
1734 	}
1735 
1736 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1737 	if (ret < 0) {
1738 		dwc3_trace(trace_dwc3_gadget,
1739 				"failed to allocate IN endpoints");
1740 		return ret;
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1747 {
1748 	struct dwc3_ep			*dep;
1749 	u8				epnum;
1750 
1751 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1752 		dep = dwc->eps[epnum];
1753 		if (!dep)
1754 			continue;
1755 		/*
1756 		 * Physical endpoints 0 and 1 are special; they form the
1757 		 * bi-directional USB endpoint 0.
1758 		 *
1759 		 * For those two physical endpoints, we don't allocate a TRB
1760 		 * pool nor do we add them the endpoints list. Due to that, we
1761 		 * shouldn't do these two operations otherwise we would end up
1762 		 * with all sorts of bugs when removing dwc3.ko.
1763 		 */
1764 		if (epnum != 0 && epnum != 1) {
1765 			dwc3_free_trb_pool(dep);
1766 			list_del(&dep->endpoint.ep_list);
1767 		}
1768 
1769 		kfree(dep);
1770 	}
1771 }
1772 
1773 /* -------------------------------------------------------------------------- */
1774 
1775 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1776 		struct dwc3_request *req, struct dwc3_trb *trb,
1777 		const struct dwc3_event_depevt *event, int status)
1778 {
1779 	unsigned int		count;
1780 	unsigned int		s_pkt = 0;
1781 	unsigned int		trb_status;
1782 
1783 	trace_dwc3_complete_trb(dep, trb);
1784 
1785 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1786 		/*
1787 		 * We continue despite the error. There is not much we
1788 		 * can do. If we don't clean it up we loop forever. If
1789 		 * we skip the TRB then it gets overwritten after a
1790 		 * while since we use them in a ring buffer. A BUG()
1791 		 * would help. Lets hope that if this occurs, someone
1792 		 * fixes the root cause instead of looking away :)
1793 		 */
1794 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1795 				dep->name, trb);
1796 	count = trb->size & DWC3_TRB_SIZE_MASK;
1797 
1798 	if (dep->direction) {
1799 		if (count) {
1800 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1801 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1802 				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1803 						dep->name);
1804 				/*
1805 				 * If missed isoc occurred and there is
1806 				 * no request queued then issue END
1807 				 * TRANSFER, so that core generates
1808 				 * next xfernotready and we will issue
1809 				 * a fresh START TRANSFER.
1810 				 * If there are still queued request
1811 				 * then wait, do not issue either END
1812 				 * or UPDATE TRANSFER, just attach next
1813 				 * request in request_list during
1814 				 * giveback.If any future queued request
1815 				 * is successfully transferred then we
1816 				 * will issue UPDATE TRANSFER for all
1817 				 * request in the request_list.
1818 				 */
1819 				dep->flags |= DWC3_EP_MISSED_ISOC;
1820 			} else {
1821 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1822 						dep->name);
1823 				status = -ECONNRESET;
1824 			}
1825 		} else {
1826 			dep->flags &= ~DWC3_EP_MISSED_ISOC;
1827 		}
1828 	} else {
1829 		if (count && (event->status & DEPEVT_STATUS_SHORT))
1830 			s_pkt = 1;
1831 	}
1832 
1833 	/*
1834 	 * We assume here we will always receive the entire data block
1835 	 * which we should receive. Meaning, if we program RX to
1836 	 * receive 4K but we receive only 2K, we assume that's all we
1837 	 * should receive and we simply bounce the request back to the
1838 	 * gadget driver for further processing.
1839 	 */
1840 	req->request.actual += req->request.length - count;
1841 	if (s_pkt)
1842 		return 1;
1843 	if ((event->status & DEPEVT_STATUS_LST) &&
1844 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
1845 				DWC3_TRB_CTRL_HWO)))
1846 		return 1;
1847 	if ((event->status & DEPEVT_STATUS_IOC) &&
1848 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1849 		return 1;
1850 	return 0;
1851 }
1852 
1853 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1854 		const struct dwc3_event_depevt *event, int status)
1855 {
1856 	struct dwc3_request	*req;
1857 	struct dwc3_trb		*trb;
1858 	unsigned int		slot;
1859 	unsigned int		i;
1860 	int			ret;
1861 
1862 	req = next_request(&dep->req_queued);
1863 	if (!req) {
1864 		WARN_ON_ONCE(1);
1865 		return 1;
1866 	}
1867 	i = 0;
1868 	do {
1869 		slot = req->start_slot + i;
1870 		if ((slot == DWC3_TRB_NUM - 1) &&
1871 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
1872 			slot++;
1873 		slot %= DWC3_TRB_NUM;
1874 		trb = &dep->trb_pool[slot];
1875 
1876 		ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1877 				event, status);
1878 		if (ret)
1879 			break;
1880 	} while (++i < req->request.num_mapped_sgs);
1881 
1882 	dwc3_gadget_giveback(dep, req, status);
1883 
1884 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1885 			list_empty(&dep->req_queued)) {
1886 		if (list_empty(&dep->request_list)) {
1887 			/*
1888 			 * If there is no entry in request list then do
1889 			 * not issue END TRANSFER now. Just set PENDING
1890 			 * flag, so that END TRANSFER is issued when an
1891 			 * entry is added into request list.
1892 			 */
1893 			dep->flags = DWC3_EP_PENDING_REQUEST;
1894 		} else {
1895 			dwc3_stop_active_transfer(dwc, dep->number, true);
1896 			dep->flags = DWC3_EP_ENABLED;
1897 		}
1898 		return 1;
1899 	}
1900 
1901 	return 1;
1902 }
1903 
1904 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1905 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1906 {
1907 	unsigned		status = 0;
1908 	int			clean_busy;
1909 	u32			is_xfer_complete;
1910 
1911 	is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
1912 
1913 	if (event->status & DEPEVT_STATUS_BUSERR)
1914 		status = -ECONNRESET;
1915 
1916 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1917 	if (clean_busy && (is_xfer_complete ||
1918 				usb_endpoint_xfer_isoc(dep->endpoint.desc)))
1919 		dep->flags &= ~DWC3_EP_BUSY;
1920 
1921 	/*
1922 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1923 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1924 	 */
1925 	if (dwc->revision < DWC3_REVISION_183A) {
1926 		u32		reg;
1927 		int		i;
1928 
1929 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1930 			dep = dwc->eps[i];
1931 
1932 			if (!(dep->flags & DWC3_EP_ENABLED))
1933 				continue;
1934 
1935 			if (!list_empty(&dep->req_queued))
1936 				return;
1937 		}
1938 
1939 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1940 		reg |= dwc->u1u2;
1941 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1942 
1943 		dwc->u1u2 = 0;
1944 	}
1945 }
1946 
1947 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1948 		const struct dwc3_event_depevt *event)
1949 {
1950 	struct dwc3_ep		*dep;
1951 	u8			epnum = event->endpoint_number;
1952 
1953 	dep = dwc->eps[epnum];
1954 
1955 	if (!(dep->flags & DWC3_EP_ENABLED))
1956 		return;
1957 
1958 	if (epnum == 0 || epnum == 1) {
1959 		dwc3_ep0_interrupt(dwc, event);
1960 		return;
1961 	}
1962 
1963 	switch (event->endpoint_event) {
1964 	case DWC3_DEPEVT_XFERCOMPLETE:
1965 		dep->resource_index = 0;
1966 
1967 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1968 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1969 					dep->name);
1970 			return;
1971 		}
1972 
1973 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1974 		break;
1975 	case DWC3_DEPEVT_XFERINPROGRESS:
1976 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1977 		break;
1978 	case DWC3_DEPEVT_XFERNOTREADY:
1979 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1980 			dwc3_gadget_start_isoc(dwc, dep, event);
1981 		} else {
1982 			int ret;
1983 
1984 			dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
1985 					dep->name, event->status &
1986 					DEPEVT_STATUS_TRANSFER_ACTIVE
1987 					? "Transfer Active"
1988 					: "Transfer Not Active");
1989 
1990 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1991 			if (!ret || ret == -EBUSY)
1992 				return;
1993 
1994 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1995 					dep->name);
1996 		}
1997 
1998 		break;
1999 	case DWC3_DEPEVT_STREAMEVT:
2000 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2001 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2002 					dep->name);
2003 			return;
2004 		}
2005 
2006 		switch (event->status) {
2007 		case DEPEVT_STREAMEVT_FOUND:
2008 			dwc3_trace(trace_dwc3_gadget,
2009 					"Stream %d found and started",
2010 					event->parameters);
2011 
2012 			break;
2013 		case DEPEVT_STREAMEVT_NOTFOUND:
2014 			/* FALLTHROUGH */
2015 		default:
2016 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2017 		}
2018 		break;
2019 	case DWC3_DEPEVT_RXTXFIFOEVT:
2020 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2021 		break;
2022 	case DWC3_DEPEVT_EPCMDCMPLT:
2023 		dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2024 		break;
2025 	}
2026 }
2027 
2028 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2029 {
2030 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2031 		spin_unlock(&dwc->lock);
2032 		dwc->gadget_driver->disconnect(&dwc->gadget);
2033 		spin_lock(&dwc->lock);
2034 	}
2035 }
2036 
2037 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2038 {
2039 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2040 		spin_unlock(&dwc->lock);
2041 		dwc->gadget_driver->suspend(&dwc->gadget);
2042 		spin_lock(&dwc->lock);
2043 	}
2044 }
2045 
2046 static void dwc3_resume_gadget(struct dwc3 *dwc)
2047 {
2048 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2049 		spin_unlock(&dwc->lock);
2050 		dwc->gadget_driver->resume(&dwc->gadget);
2051 		spin_lock(&dwc->lock);
2052 	}
2053 }
2054 
2055 static void dwc3_reset_gadget(struct dwc3 *dwc)
2056 {
2057 	if (!dwc->gadget_driver)
2058 		return;
2059 
2060 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2061 		spin_unlock(&dwc->lock);
2062 		usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2063 		spin_lock(&dwc->lock);
2064 	}
2065 }
2066 
2067 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2068 {
2069 	struct dwc3_ep *dep;
2070 	struct dwc3_gadget_ep_cmd_params params;
2071 	u32 cmd;
2072 	int ret;
2073 
2074 	dep = dwc->eps[epnum];
2075 
2076 	if (!dep->resource_index)
2077 		return;
2078 
2079 	/*
2080 	 * NOTICE: We are violating what the Databook says about the
2081 	 * EndTransfer command. Ideally we would _always_ wait for the
2082 	 * EndTransfer Command Completion IRQ, but that's causing too
2083 	 * much trouble synchronizing between us and gadget driver.
2084 	 *
2085 	 * We have discussed this with the IP Provider and it was
2086 	 * suggested to giveback all requests here, but give HW some
2087 	 * extra time to synchronize with the interconnect. We're using
2088 	 * an arbitrary 100us delay for that.
2089 	 *
2090 	 * Note also that a similar handling was tested by Synopsys
2091 	 * (thanks a lot Paul) and nothing bad has come out of it.
2092 	 * In short, what we're doing is:
2093 	 *
2094 	 * - Issue EndTransfer WITH CMDIOC bit set
2095 	 * - Wait 100us
2096 	 */
2097 
2098 	cmd = DWC3_DEPCMD_ENDTRANSFER;
2099 	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2100 	cmd |= DWC3_DEPCMD_CMDIOC;
2101 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2102 	memset(&params, 0, sizeof(params));
2103 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2104 	WARN_ON_ONCE(ret);
2105 	dep->resource_index = 0;
2106 	dep->flags &= ~DWC3_EP_BUSY;
2107 	udelay(100);
2108 }
2109 
2110 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2111 {
2112 	u32 epnum;
2113 
2114 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2115 		struct dwc3_ep *dep;
2116 
2117 		dep = dwc->eps[epnum];
2118 		if (!dep)
2119 			continue;
2120 
2121 		if (!(dep->flags & DWC3_EP_ENABLED))
2122 			continue;
2123 
2124 		dwc3_remove_requests(dwc, dep);
2125 	}
2126 }
2127 
2128 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2129 {
2130 	u32 epnum;
2131 
2132 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2133 		struct dwc3_ep *dep;
2134 		struct dwc3_gadget_ep_cmd_params params;
2135 		int ret;
2136 
2137 		dep = dwc->eps[epnum];
2138 		if (!dep)
2139 			continue;
2140 
2141 		if (!(dep->flags & DWC3_EP_STALL))
2142 			continue;
2143 
2144 		dep->flags &= ~DWC3_EP_STALL;
2145 
2146 		memset(&params, 0, sizeof(params));
2147 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2148 				DWC3_DEPCMD_CLEARSTALL, &params);
2149 		WARN_ON_ONCE(ret);
2150 	}
2151 }
2152 
2153 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2154 {
2155 	int			reg;
2156 
2157 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2158 	reg &= ~DWC3_DCTL_INITU1ENA;
2159 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2160 
2161 	reg &= ~DWC3_DCTL_INITU2ENA;
2162 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2163 
2164 	dwc3_disconnect_gadget(dwc);
2165 	dwc->start_config_issued = false;
2166 
2167 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
2168 	dwc->setup_packet_pending = false;
2169 	usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2170 }
2171 
2172 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2173 {
2174 	u32			reg;
2175 
2176 	/*
2177 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2178 	 * would cause a missing Disconnect Event if there's a
2179 	 * pending Setup Packet in the FIFO.
2180 	 *
2181 	 * There's no suggested workaround on the official Bug
2182 	 * report, which states that "unless the driver/application
2183 	 * is doing any special handling of a disconnect event,
2184 	 * there is no functional issue".
2185 	 *
2186 	 * Unfortunately, it turns out that we _do_ some special
2187 	 * handling of a disconnect event, namely complete all
2188 	 * pending transfers, notify gadget driver of the
2189 	 * disconnection, and so on.
2190 	 *
2191 	 * Our suggested workaround is to follow the Disconnect
2192 	 * Event steps here, instead, based on a setup_packet_pending
2193 	 * flag. Such flag gets set whenever we have a XferNotReady
2194 	 * event on EP0 and gets cleared on XferComplete for the
2195 	 * same endpoint.
2196 	 *
2197 	 * Refers to:
2198 	 *
2199 	 * STAR#9000466709: RTL: Device : Disconnect event not
2200 	 * generated if setup packet pending in FIFO
2201 	 */
2202 	if (dwc->revision < DWC3_REVISION_188A) {
2203 		if (dwc->setup_packet_pending)
2204 			dwc3_gadget_disconnect_interrupt(dwc);
2205 	}
2206 
2207 	dwc3_reset_gadget(dwc);
2208 
2209 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2210 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2211 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2212 	dwc->test_mode = false;
2213 
2214 	dwc3_stop_active_transfers(dwc);
2215 	dwc3_clear_stall_all_ep(dwc);
2216 	dwc->start_config_issued = false;
2217 
2218 	/* Reset device address to zero */
2219 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2220 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2221 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2222 }
2223 
2224 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2225 {
2226 	u32 reg;
2227 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2228 
2229 	/*
2230 	 * We change the clock only at SS but I dunno why I would want to do
2231 	 * this. Maybe it becomes part of the power saving plan.
2232 	 */
2233 
2234 	if (speed != DWC3_DSTS_SUPERSPEED)
2235 		return;
2236 
2237 	/*
2238 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2239 	 * each time on Connect Done.
2240 	 */
2241 	if (!usb30_clock)
2242 		return;
2243 
2244 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2245 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2246 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2247 }
2248 
2249 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2250 {
2251 	struct dwc3_ep		*dep;
2252 	int			ret;
2253 	u32			reg;
2254 	u8			speed;
2255 
2256 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2257 	speed = reg & DWC3_DSTS_CONNECTSPD;
2258 	dwc->speed = speed;
2259 
2260 	dwc3_update_ram_clk_sel(dwc, speed);
2261 
2262 	switch (speed) {
2263 	case DWC3_DCFG_SUPERSPEED:
2264 		/*
2265 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2266 		 * would cause a missing USB3 Reset event.
2267 		 *
2268 		 * In such situations, we should force a USB3 Reset
2269 		 * event by calling our dwc3_gadget_reset_interrupt()
2270 		 * routine.
2271 		 *
2272 		 * Refers to:
2273 		 *
2274 		 * STAR#9000483510: RTL: SS : USB3 reset event may
2275 		 * not be generated always when the link enters poll
2276 		 */
2277 		if (dwc->revision < DWC3_REVISION_190A)
2278 			dwc3_gadget_reset_interrupt(dwc);
2279 
2280 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2281 		dwc->gadget.ep0->maxpacket = 512;
2282 		dwc->gadget.speed = USB_SPEED_SUPER;
2283 		break;
2284 	case DWC3_DCFG_HIGHSPEED:
2285 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2286 		dwc->gadget.ep0->maxpacket = 64;
2287 		dwc->gadget.speed = USB_SPEED_HIGH;
2288 		break;
2289 	case DWC3_DCFG_FULLSPEED2:
2290 	case DWC3_DCFG_FULLSPEED1:
2291 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2292 		dwc->gadget.ep0->maxpacket = 64;
2293 		dwc->gadget.speed = USB_SPEED_FULL;
2294 		break;
2295 	case DWC3_DCFG_LOWSPEED:
2296 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2297 		dwc->gadget.ep0->maxpacket = 8;
2298 		dwc->gadget.speed = USB_SPEED_LOW;
2299 		break;
2300 	}
2301 
2302 	/* Enable USB2 LPM Capability */
2303 
2304 	if ((dwc->revision > DWC3_REVISION_194A)
2305 			&& (speed != DWC3_DCFG_SUPERSPEED)) {
2306 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2307 		reg |= DWC3_DCFG_LPM_CAP;
2308 		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2309 
2310 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2311 		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2312 
2313 		reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2314 
2315 		/*
2316 		 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2317 		 * DCFG.LPMCap is set, core responses with an ACK and the
2318 		 * BESL value in the LPM token is less than or equal to LPM
2319 		 * NYET threshold.
2320 		 */
2321 		WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2322 				&& dwc->has_lpm_erratum,
2323 				"LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2324 
2325 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2326 			reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2327 
2328 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2329 	} else {
2330 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2331 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2332 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2333 	}
2334 
2335 	dep = dwc->eps[0];
2336 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2337 			false);
2338 	if (ret) {
2339 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2340 		return;
2341 	}
2342 
2343 	dep = dwc->eps[1];
2344 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2345 			false);
2346 	if (ret) {
2347 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2348 		return;
2349 	}
2350 
2351 	/*
2352 	 * Configure PHY via GUSB3PIPECTLn if required.
2353 	 *
2354 	 * Update GTXFIFOSIZn
2355 	 *
2356 	 * In both cases reset values should be sufficient.
2357 	 */
2358 }
2359 
2360 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2361 {
2362 	/*
2363 	 * TODO take core out of low power mode when that's
2364 	 * implemented.
2365 	 */
2366 
2367 	dwc->gadget_driver->resume(&dwc->gadget);
2368 }
2369 
2370 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2371 		unsigned int evtinfo)
2372 {
2373 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2374 	unsigned int		pwropt;
2375 
2376 	/*
2377 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2378 	 * Hibernation mode enabled which would show up when device detects
2379 	 * host-initiated U3 exit.
2380 	 *
2381 	 * In that case, device will generate a Link State Change Interrupt
2382 	 * from U3 to RESUME which is only necessary if Hibernation is
2383 	 * configured in.
2384 	 *
2385 	 * There are no functional changes due to such spurious event and we
2386 	 * just need to ignore it.
2387 	 *
2388 	 * Refers to:
2389 	 *
2390 	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2391 	 * operational mode
2392 	 */
2393 	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2394 	if ((dwc->revision < DWC3_REVISION_250A) &&
2395 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2396 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2397 				(next == DWC3_LINK_STATE_RESUME)) {
2398 			dwc3_trace(trace_dwc3_gadget,
2399 					"ignoring transition U3 -> Resume");
2400 			return;
2401 		}
2402 	}
2403 
2404 	/*
2405 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2406 	 * on the link partner, the USB session might do multiple entry/exit
2407 	 * of low power states before a transfer takes place.
2408 	 *
2409 	 * Due to this problem, we might experience lower throughput. The
2410 	 * suggested workaround is to disable DCTL[12:9] bits if we're
2411 	 * transitioning from U1/U2 to U0 and enable those bits again
2412 	 * after a transfer completes and there are no pending transfers
2413 	 * on any of the enabled endpoints.
2414 	 *
2415 	 * This is the first half of that workaround.
2416 	 *
2417 	 * Refers to:
2418 	 *
2419 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2420 	 * core send LGO_Ux entering U0
2421 	 */
2422 	if (dwc->revision < DWC3_REVISION_183A) {
2423 		if (next == DWC3_LINK_STATE_U0) {
2424 			u32	u1u2;
2425 			u32	reg;
2426 
2427 			switch (dwc->link_state) {
2428 			case DWC3_LINK_STATE_U1:
2429 			case DWC3_LINK_STATE_U2:
2430 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2431 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2432 						| DWC3_DCTL_ACCEPTU2ENA
2433 						| DWC3_DCTL_INITU1ENA
2434 						| DWC3_DCTL_ACCEPTU1ENA);
2435 
2436 				if (!dwc->u1u2)
2437 					dwc->u1u2 = reg & u1u2;
2438 
2439 				reg &= ~u1u2;
2440 
2441 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2442 				break;
2443 			default:
2444 				/* do nothing */
2445 				break;
2446 			}
2447 		}
2448 	}
2449 
2450 	switch (next) {
2451 	case DWC3_LINK_STATE_U1:
2452 		if (dwc->speed == USB_SPEED_SUPER)
2453 			dwc3_suspend_gadget(dwc);
2454 		break;
2455 	case DWC3_LINK_STATE_U2:
2456 	case DWC3_LINK_STATE_U3:
2457 		dwc3_suspend_gadget(dwc);
2458 		break;
2459 	case DWC3_LINK_STATE_RESUME:
2460 		dwc3_resume_gadget(dwc);
2461 		break;
2462 	default:
2463 		/* do nothing */
2464 		break;
2465 	}
2466 
2467 	dwc->link_state = next;
2468 }
2469 
2470 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2471 		unsigned int evtinfo)
2472 {
2473 	unsigned int is_ss = evtinfo & BIT(4);
2474 
2475 	/**
2476 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2477 	 * have a known issue which can cause USB CV TD.9.23 to fail
2478 	 * randomly.
2479 	 *
2480 	 * Because of this issue, core could generate bogus hibernation
2481 	 * events which SW needs to ignore.
2482 	 *
2483 	 * Refers to:
2484 	 *
2485 	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2486 	 * Device Fallback from SuperSpeed
2487 	 */
2488 	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2489 		return;
2490 
2491 	/* enter hibernation here */
2492 }
2493 
2494 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2495 		const struct dwc3_event_devt *event)
2496 {
2497 	switch (event->type) {
2498 	case DWC3_DEVICE_EVENT_DISCONNECT:
2499 		dwc3_gadget_disconnect_interrupt(dwc);
2500 		break;
2501 	case DWC3_DEVICE_EVENT_RESET:
2502 		dwc3_gadget_reset_interrupt(dwc);
2503 		break;
2504 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2505 		dwc3_gadget_conndone_interrupt(dwc);
2506 		break;
2507 	case DWC3_DEVICE_EVENT_WAKEUP:
2508 		dwc3_gadget_wakeup_interrupt(dwc);
2509 		break;
2510 	case DWC3_DEVICE_EVENT_HIBER_REQ:
2511 		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2512 					"unexpected hibernation event\n"))
2513 			break;
2514 
2515 		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2516 		break;
2517 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2518 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2519 		break;
2520 	case DWC3_DEVICE_EVENT_EOPF:
2521 		dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2522 		break;
2523 	case DWC3_DEVICE_EVENT_SOF:
2524 		dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2525 		break;
2526 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2527 		dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2528 		break;
2529 	case DWC3_DEVICE_EVENT_CMD_CMPL:
2530 		dwc3_trace(trace_dwc3_gadget, "Command Complete");
2531 		break;
2532 	case DWC3_DEVICE_EVENT_OVERFLOW:
2533 		dwc3_trace(trace_dwc3_gadget, "Overflow");
2534 		break;
2535 	default:
2536 		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2537 	}
2538 }
2539 
2540 static void dwc3_process_event_entry(struct dwc3 *dwc,
2541 		const union dwc3_event *event)
2542 {
2543 	trace_dwc3_event(event->raw);
2544 
2545 	/* Endpoint IRQ, handle it and return early */
2546 	if (event->type.is_devspec == 0) {
2547 		/* depevt */
2548 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2549 	}
2550 
2551 	switch (event->type.type) {
2552 	case DWC3_EVENT_TYPE_DEV:
2553 		dwc3_gadget_interrupt(dwc, &event->devt);
2554 		break;
2555 	/* REVISIT what to do with Carkit and I2C events ? */
2556 	default:
2557 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2558 	}
2559 }
2560 
2561 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2562 {
2563 	struct dwc3_event_buffer *evt;
2564 	irqreturn_t ret = IRQ_NONE;
2565 	int left;
2566 	u32 reg;
2567 
2568 	evt = dwc->ev_buffs[buf];
2569 	left = evt->count;
2570 
2571 	if (!(evt->flags & DWC3_EVENT_PENDING))
2572 		return IRQ_NONE;
2573 
2574 	while (left > 0) {
2575 		union dwc3_event event;
2576 
2577 		event.raw = *(u32 *) (evt->buf + evt->lpos);
2578 
2579 		dwc3_process_event_entry(dwc, &event);
2580 
2581 		/*
2582 		 * FIXME we wrap around correctly to the next entry as
2583 		 * almost all entries are 4 bytes in size. There is one
2584 		 * entry which has 12 bytes which is a regular entry
2585 		 * followed by 8 bytes data. ATM I don't know how
2586 		 * things are organized if we get next to the a
2587 		 * boundary so I worry about that once we try to handle
2588 		 * that.
2589 		 */
2590 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2591 		left -= 4;
2592 
2593 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2594 	}
2595 
2596 	evt->count = 0;
2597 	evt->flags &= ~DWC3_EVENT_PENDING;
2598 	ret = IRQ_HANDLED;
2599 
2600 	/* Unmask interrupt */
2601 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2602 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
2603 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2604 
2605 	return ret;
2606 }
2607 
2608 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2609 {
2610 	struct dwc3 *dwc = _dwc;
2611 	unsigned long flags;
2612 	irqreturn_t ret = IRQ_NONE;
2613 	int i;
2614 
2615 	spin_lock_irqsave(&dwc->lock, flags);
2616 
2617 	for (i = 0; i < dwc->num_event_buffers; i++)
2618 		ret |= dwc3_process_event_buf(dwc, i);
2619 
2620 	spin_unlock_irqrestore(&dwc->lock, flags);
2621 
2622 	return ret;
2623 }
2624 
2625 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2626 {
2627 	struct dwc3_event_buffer *evt;
2628 	u32 count;
2629 	u32 reg;
2630 
2631 	evt = dwc->ev_buffs[buf];
2632 
2633 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2634 	count &= DWC3_GEVNTCOUNT_MASK;
2635 	if (!count)
2636 		return IRQ_NONE;
2637 
2638 	evt->count = count;
2639 	evt->flags |= DWC3_EVENT_PENDING;
2640 
2641 	/* Mask interrupt */
2642 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2643 	reg |= DWC3_GEVNTSIZ_INTMASK;
2644 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2645 
2646 	return IRQ_WAKE_THREAD;
2647 }
2648 
2649 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2650 {
2651 	struct dwc3			*dwc = _dwc;
2652 	int				i;
2653 	irqreturn_t			ret = IRQ_NONE;
2654 
2655 	spin_lock(&dwc->lock);
2656 
2657 	for (i = 0; i < dwc->num_event_buffers; i++) {
2658 		irqreturn_t status;
2659 
2660 		status = dwc3_check_event_buf(dwc, i);
2661 		if (status == IRQ_WAKE_THREAD)
2662 			ret = status;
2663 	}
2664 
2665 	spin_unlock(&dwc->lock);
2666 
2667 	return ret;
2668 }
2669 
2670 /**
2671  * dwc3_gadget_init - Initializes gadget related registers
2672  * @dwc: pointer to our controller context structure
2673  *
2674  * Returns 0 on success otherwise negative errno.
2675  */
2676 int dwc3_gadget_init(struct dwc3 *dwc)
2677 {
2678 	int					ret;
2679 
2680 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2681 			&dwc->ctrl_req_addr, GFP_KERNEL);
2682 	if (!dwc->ctrl_req) {
2683 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2684 		ret = -ENOMEM;
2685 		goto err0;
2686 	}
2687 
2688 	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2689 			&dwc->ep0_trb_addr, GFP_KERNEL);
2690 	if (!dwc->ep0_trb) {
2691 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2692 		ret = -ENOMEM;
2693 		goto err1;
2694 	}
2695 
2696 	dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2697 	if (!dwc->setup_buf) {
2698 		ret = -ENOMEM;
2699 		goto err2;
2700 	}
2701 
2702 	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2703 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2704 			GFP_KERNEL);
2705 	if (!dwc->ep0_bounce) {
2706 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2707 		ret = -ENOMEM;
2708 		goto err3;
2709 	}
2710 
2711 	dwc->gadget.ops			= &dwc3_gadget_ops;
2712 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2713 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2714 	dwc->gadget.sg_supported	= true;
2715 	dwc->gadget.name		= "dwc3-gadget";
2716 
2717 	/*
2718 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2719 	 * on ep out.
2720 	 */
2721 	dwc->gadget.quirk_ep_out_aligned_size = true;
2722 
2723 	/*
2724 	 * REVISIT: Here we should clear all pending IRQs to be
2725 	 * sure we're starting from a well known location.
2726 	 */
2727 
2728 	ret = dwc3_gadget_init_endpoints(dwc);
2729 	if (ret)
2730 		goto err4;
2731 
2732 	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2733 	if (ret) {
2734 		dev_err(dwc->dev, "failed to register udc\n");
2735 		goto err4;
2736 	}
2737 
2738 	return 0;
2739 
2740 err4:
2741 	dwc3_gadget_free_endpoints(dwc);
2742 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2743 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2744 
2745 err3:
2746 	kfree(dwc->setup_buf);
2747 
2748 err2:
2749 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2750 			dwc->ep0_trb, dwc->ep0_trb_addr);
2751 
2752 err1:
2753 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2754 			dwc->ctrl_req, dwc->ctrl_req_addr);
2755 
2756 err0:
2757 	return ret;
2758 }
2759 
2760 /* -------------------------------------------------------------------------- */
2761 
2762 void dwc3_gadget_exit(struct dwc3 *dwc)
2763 {
2764 	usb_del_gadget_udc(&dwc->gadget);
2765 
2766 	dwc3_gadget_free_endpoints(dwc);
2767 
2768 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2769 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2770 
2771 	kfree(dwc->setup_buf);
2772 
2773 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2774 			dwc->ep0_trb, dwc->ep0_trb_addr);
2775 
2776 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2777 			dwc->ctrl_req, dwc->ctrl_req_addr);
2778 }
2779 
2780 int dwc3_gadget_suspend(struct dwc3 *dwc)
2781 {
2782 	if (dwc->pullups_connected) {
2783 		dwc3_gadget_disable_irq(dwc);
2784 		dwc3_gadget_run_stop(dwc, true, true);
2785 	}
2786 
2787 	__dwc3_gadget_ep_disable(dwc->eps[0]);
2788 	__dwc3_gadget_ep_disable(dwc->eps[1]);
2789 
2790 	dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2791 
2792 	return 0;
2793 }
2794 
2795 int dwc3_gadget_resume(struct dwc3 *dwc)
2796 {
2797 	struct dwc3_ep		*dep;
2798 	int			ret;
2799 
2800 	/* Start with SuperSpeed Default */
2801 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2802 
2803 	dep = dwc->eps[0];
2804 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2805 			false);
2806 	if (ret)
2807 		goto err0;
2808 
2809 	dep = dwc->eps[1];
2810 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2811 			false);
2812 	if (ret)
2813 		goto err1;
2814 
2815 	/* begin to receive SETUP packets */
2816 	dwc->ep0state = EP0_SETUP_PHASE;
2817 	dwc3_ep0_out_start(dwc);
2818 
2819 	dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2820 
2821 	if (dwc->pullups_connected) {
2822 		dwc3_gadget_enable_irq(dwc);
2823 		dwc3_gadget_run_stop(dwc, true, false);
2824 	}
2825 
2826 	return 0;
2827 
2828 err1:
2829 	__dwc3_gadget_ep_disable(dwc->eps[0]);
2830 
2831 err0:
2832 	return ret;
2833 }
2834