xref: /linux/drivers/usb/cdns3/cdns3-gadget.c (revision d3616e09baa5b3fe9b45270c60ef693879e215ff)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Cadence USBSS DRD Driver - gadget side.
4   *
5   * Copyright (C) 2018-2019 Cadence Design Systems.
6   * Copyright (C) 2017-2018 NXP
7   *
8   * Authors: Pawel Jez <pjez@cadence.com>,
9   *          Pawel Laszczak <pawell@cadence.com>
10   *          Peter Chen <peter.chen@nxp.com>
11   */
12  
13  /*
14   * Work around 1:
15   * At some situations, the controller may get stale data address in TRB
16   * at below sequences:
17   * 1. Controller read TRB includes data address
18   * 2. Software updates TRBs includes data address and Cycle bit
19   * 3. Controller read TRB which includes Cycle bit
20   * 4. DMA run with stale data address
21   *
22   * To fix this problem, driver needs to make the first TRB in TD as invalid.
23   * After preparing all TRBs driver needs to check the position of DMA and
24   * if the DMA point to the first just added TRB and doorbell is 1,
25   * then driver must defer making this TRB as valid. This TRB will be make
26   * as valid during adding next TRB only if DMA is stopped or at TRBERR
27   * interrupt.
28   *
29   * Issue has been fixed in DEV_VER_V3 version of controller.
30   *
31   * Work around 2:
32   * Controller for OUT endpoints has shared on-chip buffers for all incoming
33   * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
34   * in correct order. If the first packet in the buffer will not be handled,
35   * then the following packets directed for other endpoints and  functions
36   * will be blocked.
37   * Additionally the packets directed to one endpoint can block entire on-chip
38   * buffers. In this case transfer to other endpoints also will blocked.
39   *
40   * To resolve this issue after raising the descriptor missing interrupt
41   * driver prepares internal usb_request object and use it to arm DMA transfer.
42   *
43   * The problematic situation was observed in case when endpoint has been enabled
44   * but no usb_request were queued. Driver try detects such endpoints and will
45   * use this workaround only for these endpoint.
46   *
47   * Driver use limited number of buffer. This number can be set by macro
48   * CDNS3_WA2_NUM_BUFFERS.
49   *
50   * Such blocking situation was observed on ACM gadget. For this function
51   * host send OUT data packet but ACM function is not prepared for this packet.
52   * It's cause that buffer placed in on chip memory block transfer to other
53   * endpoints.
54   *
55   * Issue has been fixed in DEV_VER_V2 version of controller.
56   *
57   */
58  
59  #include <linux/dma-mapping.h>
60  #include <linux/usb/gadget.h>
61  #include <linux/module.h>
62  #include <linux/dmapool.h>
63  #include <linux/iopoll.h>
64  
65  #include "core.h"
66  #include "gadget-export.h"
67  #include "cdns3-gadget.h"
68  #include "cdns3-trace.h"
69  #include "drd.h"
70  
71  static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
72  				   struct usb_request *request,
73  				   gfp_t gfp_flags);
74  
75  static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
76  				 struct usb_request *request);
77  
78  static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
79  					struct usb_request *request);
80  
81  /**
82   * cdns3_clear_register_bit - clear bit in given register.
83   * @ptr: address of device controller register to be read and changed
84   * @mask: bits requested to clar
85   */
86  static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
87  {
88  	mask = readl(ptr) & ~mask;
89  	writel(mask, ptr);
90  }
91  
92  /**
93   * cdns3_set_register_bit - set bit in given register.
94   * @ptr: address of device controller register to be read and changed
95   * @mask: bits requested to set
96   */
97  void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
98  {
99  	mask = readl(ptr) | mask;
100  	writel(mask, ptr);
101  }
102  
103  /**
104   * cdns3_ep_addr_to_index - Macro converts endpoint address to
105   * index of endpoint object in cdns3_device.eps[] container
106   * @ep_addr: endpoint address for which endpoint object is required
107   *
108   */
109  u8 cdns3_ep_addr_to_index(u8 ep_addr)
110  {
111  	return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
112  }
113  
114  static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
115  			     struct cdns3_endpoint *priv_ep)
116  {
117  	int dma_index;
118  
119  	dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
120  
121  	return dma_index / TRB_SIZE;
122  }
123  
124  /**
125   * cdns3_next_request - returns next request from list
126   * @list: list containing requests
127   *
128   * Returns request or NULL if no requests in list
129   */
130  struct usb_request *cdns3_next_request(struct list_head *list)
131  {
132  	return list_first_entry_or_null(list, struct usb_request, list);
133  }
134  
135  /**
136   * cdns3_next_align_buf - returns next buffer from list
137   * @list: list containing buffers
138   *
139   * Returns buffer or NULL if no buffers in list
140   */
141  static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
142  {
143  	return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
144  }
145  
146  /**
147   * cdns3_next_priv_request - returns next request from list
148   * @list: list containing requests
149   *
150   * Returns request or NULL if no requests in list
151   */
152  static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
153  {
154  	return list_first_entry_or_null(list, struct cdns3_request, list);
155  }
156  
157  /**
158   * cdns3_select_ep - selects endpoint
159   * @priv_dev:  extended gadget object
160   * @ep: endpoint address
161   */
162  void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
163  {
164  	if (priv_dev->selected_ep == ep)
165  		return;
166  
167  	priv_dev->selected_ep = ep;
168  	writel(ep, &priv_dev->regs->ep_sel);
169  }
170  
171  /**
172   * cdns3_get_tdl - gets current tdl for selected endpoint.
173   * @priv_dev:  extended gadget object
174   *
175   * Before calling this function the appropriate endpoint must
176   * be selected by means of cdns3_select_ep function.
177   */
178  static int cdns3_get_tdl(struct cdns3_device *priv_dev)
179  {
180  	if (priv_dev->dev_ver < DEV_VER_V3)
181  		return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
182  	else
183  		return readl(&priv_dev->regs->ep_tdl);
184  }
185  
186  dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
187  				 struct cdns3_trb *trb)
188  {
189  	u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
190  
191  	return priv_ep->trb_pool_dma + offset;
192  }
193  
194  static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
195  {
196  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
197  
198  	if (priv_ep->trb_pool) {
199  		dma_pool_free(priv_dev->eps_dma_pool,
200  			      priv_ep->trb_pool, priv_ep->trb_pool_dma);
201  		priv_ep->trb_pool = NULL;
202  	}
203  }
204  
205  /**
206   * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
207   * @priv_ep:  endpoint object
208   *
209   * Function will return 0 on success or -ENOMEM on allocation error
210   */
211  int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
212  {
213  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
214  	int ring_size = TRB_RING_SIZE;
215  	int num_trbs = ring_size / TRB_SIZE;
216  	struct cdns3_trb *link_trb;
217  
218  	if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
219  		cdns3_free_trb_pool(priv_ep);
220  
221  	if (!priv_ep->trb_pool) {
222  		priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
223  						   GFP_DMA32 | GFP_ATOMIC,
224  						   &priv_ep->trb_pool_dma);
225  
226  		if (!priv_ep->trb_pool)
227  			return -ENOMEM;
228  
229  		priv_ep->alloc_ring_size = ring_size;
230  	}
231  
232  	memset(priv_ep->trb_pool, 0, ring_size);
233  
234  	priv_ep->num_trbs = num_trbs;
235  
236  	if (!priv_ep->num)
237  		return 0;
238  
239  	/* Initialize the last TRB as Link TRB */
240  	link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
241  
242  	if (priv_ep->use_streams) {
243  		/*
244  		 * For stream capable endpoints driver use single correct TRB.
245  		 * The last trb has zeroed cycle bit
246  		 */
247  		link_trb->control = 0;
248  	} else {
249  		link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
250  		link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
251  	}
252  	return 0;
253  }
254  
255  /**
256   * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
257   * @priv_ep: endpoint object
258   *
259   * Endpoint must be selected before call to this function
260   */
261  static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
262  {
263  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
264  	int val;
265  
266  	trace_cdns3_halt(priv_ep, 1, 1);
267  
268  	writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
269  	       &priv_dev->regs->ep_cmd);
270  
271  	/* wait for DFLUSH cleared */
272  	readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
273  				  !(val & EP_CMD_DFLUSH), 1, 1000);
274  	priv_ep->flags |= EP_STALLED;
275  	priv_ep->flags &= ~EP_STALL_PENDING;
276  }
277  
278  /**
279   * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
280   * @priv_dev: extended gadget object
281   */
282  void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
283  {
284  	int i;
285  
286  	writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
287  
288  	cdns3_allow_enable_l1(priv_dev, 0);
289  	priv_dev->hw_configured_flag = 0;
290  	priv_dev->onchip_used_size = 0;
291  	priv_dev->out_mem_is_allocated = 0;
292  	priv_dev->wait_for_setup = 0;
293  	priv_dev->using_streams = 0;
294  
295  	for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
296  		if (priv_dev->eps[i])
297  			priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
298  }
299  
300  /**
301   * cdns3_ep_inc_trb - increment a trb index.
302   * @index: Pointer to the TRB index to increment.
303   * @cs: Cycle state
304   * @trb_in_seg: number of TRBs in segment
305   *
306   * The index should never point to the link TRB. After incrementing,
307   * if it is point to the link TRB, wrap around to the beginning and revert
308   * cycle state bit The
309   * link TRB is always at the last TRB entry.
310   */
311  static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
312  {
313  	(*index)++;
314  	if (*index == (trb_in_seg - 1)) {
315  		*index = 0;
316  		*cs ^=  1;
317  	}
318  }
319  
320  /**
321   * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
322   * @priv_ep: The endpoint whose enqueue pointer we're incrementing
323   */
324  static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
325  {
326  	priv_ep->free_trbs--;
327  	cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
328  }
329  
330  /**
331   * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
332   * @priv_ep: The endpoint whose dequeue pointer we're incrementing
333   */
334  static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
335  {
336  	priv_ep->free_trbs++;
337  	cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
338  }
339  
340  /**
341   * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
342   * @priv_dev: Extended gadget object
343   * @enable: Enable/disable permit to transition to L1.
344   *
345   * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
346   * then controller answer with ACK handshake.
347   * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
348   * then controller answer with NYET handshake.
349   */
350  void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
351  {
352  	if (enable)
353  		writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
354  	else
355  		writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
356  }
357  
358  enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
359  {
360  	u32 reg;
361  
362  	reg = readl(&priv_dev->regs->usb_sts);
363  
364  	if (DEV_SUPERSPEED(reg))
365  		return USB_SPEED_SUPER;
366  	else if (DEV_HIGHSPEED(reg))
367  		return USB_SPEED_HIGH;
368  	else if (DEV_FULLSPEED(reg))
369  		return USB_SPEED_FULL;
370  	else if (DEV_LOWSPEED(reg))
371  		return USB_SPEED_LOW;
372  	return USB_SPEED_UNKNOWN;
373  }
374  
375  /**
376   * cdns3_start_all_request - add to ring all request not started
377   * @priv_dev: Extended gadget object
378   * @priv_ep: The endpoint for whom request will be started.
379   *
380   * Returns return ENOMEM if transfer ring i not enough TRBs to start
381   *         all requests.
382   */
383  static int cdns3_start_all_request(struct cdns3_device *priv_dev,
384  				   struct cdns3_endpoint *priv_ep)
385  {
386  	struct usb_request *request;
387  	int ret = 0;
388  	u8 pending_empty = list_empty(&priv_ep->pending_req_list);
389  
390  	/*
391  	 * If the last pending transfer is INTERNAL
392  	 * OR streams are enabled for this endpoint
393  	 * do NOT start new transfer till the last one is pending
394  	 */
395  	if (!pending_empty) {
396  		struct cdns3_request *priv_req;
397  
398  		request = cdns3_next_request(&priv_ep->pending_req_list);
399  		priv_req = to_cdns3_request(request);
400  		if ((priv_req->flags & REQUEST_INTERNAL) ||
401  		    (priv_ep->flags & EP_TDLCHK_EN) ||
402  			priv_ep->use_streams) {
403  			dev_dbg(priv_dev->dev, "Blocking external request\n");
404  			return ret;
405  		}
406  	}
407  
408  	while (!list_empty(&priv_ep->deferred_req_list)) {
409  		request = cdns3_next_request(&priv_ep->deferred_req_list);
410  
411  		if (!priv_ep->use_streams) {
412  			ret = cdns3_ep_run_transfer(priv_ep, request);
413  		} else {
414  			priv_ep->stream_sg_idx = 0;
415  			ret = cdns3_ep_run_stream_transfer(priv_ep, request);
416  		}
417  		if (ret)
418  			return ret;
419  
420  		list_move_tail(&request->list, &priv_ep->pending_req_list);
421  		if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
422  			break;
423  	}
424  
425  	priv_ep->flags &= ~EP_RING_FULL;
426  	return ret;
427  }
428  
429  /*
430   * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
431   * driver try to detect whether endpoint need additional internal
432   * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
433   * if before first DESCMISS interrupt the DMA will be armed.
434   */
435  #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
436  	if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
437  		priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
438  		(reg) |= EP_STS_EN_DESCMISEN; \
439  	} } while (0)
440  
441  static void __cdns3_descmiss_copy_data(struct usb_request *request,
442  	struct usb_request *descmiss_req)
443  {
444  	int length = request->actual + descmiss_req->actual;
445  	struct scatterlist *s = request->sg;
446  
447  	if (!s) {
448  		if (length <= request->length) {
449  			memcpy(&((u8 *)request->buf)[request->actual],
450  			       descmiss_req->buf,
451  			       descmiss_req->actual);
452  			request->actual = length;
453  		} else {
454  			/* It should never occures */
455  			request->status = -ENOMEM;
456  		}
457  	} else {
458  		if (length <= sg_dma_len(s)) {
459  			void *p = phys_to_virt(sg_dma_address(s));
460  
461  			memcpy(&((u8 *)p)[request->actual],
462  				descmiss_req->buf,
463  				descmiss_req->actual);
464  			request->actual = length;
465  		} else {
466  			request->status = -ENOMEM;
467  		}
468  	}
469  }
470  
471  /**
472   * cdns3_wa2_descmiss_copy_data - copy data from internal requests to
473   * request queued by class driver.
474   * @priv_ep: extended endpoint object
475   * @request: request object
476   */
477  static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
478  					 struct usb_request *request)
479  {
480  	struct usb_request *descmiss_req;
481  	struct cdns3_request *descmiss_priv_req;
482  
483  	while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
484  		int chunk_end;
485  
486  		descmiss_priv_req =
487  			cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
488  		descmiss_req = &descmiss_priv_req->request;
489  
490  		/* driver can't touch pending request */
491  		if (descmiss_priv_req->flags & REQUEST_PENDING)
492  			break;
493  
494  		chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
495  		request->status = descmiss_req->status;
496  		__cdns3_descmiss_copy_data(request, descmiss_req);
497  		list_del_init(&descmiss_priv_req->list);
498  		kfree(descmiss_req->buf);
499  		cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
500  		--priv_ep->wa2_counter;
501  
502  		if (!chunk_end)
503  			break;
504  	}
505  }
506  
507  static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
508  						     struct cdns3_endpoint *priv_ep,
509  						     struct cdns3_request *priv_req)
510  {
511  	if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
512  	    priv_req->flags & REQUEST_INTERNAL) {
513  		struct usb_request *req;
514  
515  		req = cdns3_next_request(&priv_ep->deferred_req_list);
516  
517  		priv_ep->descmis_req = NULL;
518  
519  		if (!req)
520  			return NULL;
521  
522  		/* unmap the gadget request before copying data */
523  		usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
524  						priv_ep->dir);
525  
526  		cdns3_wa2_descmiss_copy_data(priv_ep, req);
527  		if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
528  		    req->length != req->actual) {
529  			/* wait for next part of transfer */
530  			/* re-map the gadget request buffer*/
531  			usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
532  				usb_endpoint_dir_in(priv_ep->endpoint.desc));
533  			return NULL;
534  		}
535  
536  		if (req->status == -EINPROGRESS)
537  			req->status = 0;
538  
539  		list_del_init(&req->list);
540  		cdns3_start_all_request(priv_dev, priv_ep);
541  		return req;
542  	}
543  
544  	return &priv_req->request;
545  }
546  
547  static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
548  				     struct cdns3_endpoint *priv_ep,
549  				     struct cdns3_request *priv_req)
550  {
551  	int deferred = 0;
552  
553  	/*
554  	 * If transfer was queued before DESCMISS appear than we
555  	 * can disable handling of DESCMISS interrupt. Driver assumes that it
556  	 * can disable special treatment for this endpoint.
557  	 */
558  	if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
559  		u32 reg;
560  
561  		cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
562  		priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
563  		reg = readl(&priv_dev->regs->ep_sts_en);
564  		reg &= ~EP_STS_EN_DESCMISEN;
565  		trace_cdns3_wa2(priv_ep, "workaround disabled\n");
566  		writel(reg, &priv_dev->regs->ep_sts_en);
567  	}
568  
569  	if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
570  		u8 pending_empty = list_empty(&priv_ep->pending_req_list);
571  		u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
572  
573  		/*
574  		 *  DESCMISS transfer has been finished, so data will be
575  		 *  directly copied from internal allocated usb_request
576  		 *  objects.
577  		 */
578  		if (pending_empty && !descmiss_empty &&
579  		    !(priv_req->flags & REQUEST_INTERNAL)) {
580  			cdns3_wa2_descmiss_copy_data(priv_ep,
581  						     &priv_req->request);
582  
583  			trace_cdns3_wa2(priv_ep, "get internal stored data");
584  
585  			list_add_tail(&priv_req->request.list,
586  				      &priv_ep->pending_req_list);
587  			cdns3_gadget_giveback(priv_ep, priv_req,
588  					      priv_req->request.status);
589  
590  			/*
591  			 * Intentionally driver returns positive value as
592  			 * correct value. It informs that transfer has
593  			 * been finished.
594  			 */
595  			return EINPROGRESS;
596  		}
597  
598  		/*
599  		 * Driver will wait for completion DESCMISS transfer,
600  		 * before starts new, not DESCMISS transfer.
601  		 */
602  		if (!pending_empty && !descmiss_empty) {
603  			trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
604  			deferred = 1;
605  		}
606  
607  		if (priv_req->flags & REQUEST_INTERNAL)
608  			list_add_tail(&priv_req->list,
609  				      &priv_ep->wa2_descmiss_req_list);
610  	}
611  
612  	return deferred;
613  }
614  
615  static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
616  {
617  	struct cdns3_request *priv_req;
618  
619  	while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
620  		u8 chain;
621  
622  		priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
623  		chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
624  
625  		trace_cdns3_wa2(priv_ep, "removes eldest request");
626  
627  		kfree(priv_req->request.buf);
628  		cdns3_gadget_ep_free_request(&priv_ep->endpoint,
629  					     &priv_req->request);
630  		list_del_init(&priv_req->list);
631  		--priv_ep->wa2_counter;
632  
633  		if (!chain)
634  			break;
635  	}
636  }
637  
638  /**
639   * cdns3_wa2_descmissing_packet - handles descriptor missing event.
640   * @priv_ep: extended gadget object
641   *
642   * This function is used only for WA2. For more information see Work around 2
643   * description.
644   */
645  static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
646  {
647  	struct cdns3_request *priv_req;
648  	struct usb_request *request;
649  	u8 pending_empty = list_empty(&priv_ep->pending_req_list);
650  
651  	/* check for pending transfer */
652  	if (!pending_empty) {
653  		trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
654  		return;
655  	}
656  
657  	if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
658  		priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
659  		priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
660  	}
661  
662  	trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
663  
664  	if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
665  		trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
666  		cdns3_wa2_remove_old_request(priv_ep);
667  	}
668  
669  	request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
670  						GFP_ATOMIC);
671  	if (!request)
672  		goto err;
673  
674  	priv_req = to_cdns3_request(request);
675  	priv_req->flags |= REQUEST_INTERNAL;
676  
677  	/* if this field is still assigned it indicate that transfer related
678  	 * with this request has not been finished yet. Driver in this
679  	 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
680  	 * flag to previous one. It will indicate that current request is
681  	 * part of the previous one.
682  	 */
683  	if (priv_ep->descmis_req)
684  		priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
685  
686  	priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
687  					GFP_ATOMIC);
688  	priv_ep->wa2_counter++;
689  
690  	if (!priv_req->request.buf) {
691  		cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
692  		goto err;
693  	}
694  
695  	priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
696  	priv_ep->descmis_req = priv_req;
697  
698  	__cdns3_gadget_ep_queue(&priv_ep->endpoint,
699  				&priv_ep->descmis_req->request,
700  				GFP_ATOMIC);
701  
702  	return;
703  
704  err:
705  	dev_err(priv_ep->cdns3_dev->dev,
706  		"Failed: No sufficient memory for DESCMIS\n");
707  }
708  
709  static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
710  {
711  	u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
712  
713  	if (tdl) {
714  		u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
715  
716  		writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
717  		       &priv_dev->regs->ep_cmd);
718  	}
719  }
720  
721  static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
722  {
723  	u32 ep_sts_reg;
724  
725  	/* select EP0-out */
726  	cdns3_select_ep(priv_dev, 0);
727  
728  	ep_sts_reg = readl(&priv_dev->regs->ep_sts);
729  
730  	if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
731  		u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
732  		struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
733  
734  		if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
735  		    outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
736  			u8 pending_empty = list_empty(&outq_ep->pending_req_list);
737  
738  			if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
739  			    (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
740  			    !pending_empty) {
741  			} else {
742  				u32 ep_sts_en_reg;
743  				u32 ep_cmd_reg;
744  
745  				cdns3_select_ep(priv_dev, outq_ep->num |
746  						outq_ep->dir);
747  				ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
748  				ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
749  
750  				outq_ep->flags |= EP_TDLCHK_EN;
751  				cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
752  						       EP_CFG_TDL_CHK);
753  
754  				cdns3_wa2_enable_detection(priv_dev, outq_ep,
755  							   ep_sts_en_reg);
756  				writel(ep_sts_en_reg,
757  				       &priv_dev->regs->ep_sts_en);
758  				/* reset tdl value to zero */
759  				cdns3_wa2_reset_tdl(priv_dev);
760  				/*
761  				 * Memory barrier - Reset tdl before ringing the
762  				 * doorbell.
763  				 */
764  				wmb();
765  				if (EP_CMD_DRDY & ep_cmd_reg) {
766  					trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
767  
768  				} else {
769  					trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
770  					/*
771  					 * ring doorbell to generate DESCMIS irq
772  					 */
773  					writel(EP_CMD_DRDY,
774  					       &priv_dev->regs->ep_cmd);
775  				}
776  			}
777  		}
778  	}
779  }
780  
781  /**
782   * cdns3_gadget_giveback - call struct usb_request's ->complete callback
783   * @priv_ep: The endpoint to whom the request belongs to
784   * @priv_req: The request we're giving back
785   * @status: completion code for the request
786   *
787   * Must be called with controller's lock held and interrupts disabled. This
788   * function will unmap @req and call its ->complete() callback to notify upper
789   * layers that it has completed.
790   */
791  void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
792  			   struct cdns3_request *priv_req,
793  			   int status)
794  {
795  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
796  	struct usb_request *request = &priv_req->request;
797  
798  	list_del_init(&request->list);
799  
800  	if (request->status == -EINPROGRESS)
801  		request->status = status;
802  
803  	usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
804  					priv_ep->dir);
805  
806  	if ((priv_req->flags & REQUEST_UNALIGNED) &&
807  	    priv_ep->dir == USB_DIR_OUT && !request->status) {
808  		/* Make DMA buffer CPU accessible */
809  		dma_sync_single_for_cpu(priv_dev->sysdev,
810  			priv_req->aligned_buf->dma,
811  			priv_req->aligned_buf->size,
812  			priv_req->aligned_buf->dir);
813  		memcpy(request->buf, priv_req->aligned_buf->buf,
814  		       request->length);
815  	}
816  
817  	priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
818  	/* All TRBs have finished, clear the counter */
819  	priv_req->finished_trb = 0;
820  	trace_cdns3_gadget_giveback(priv_req);
821  
822  	if (priv_dev->dev_ver < DEV_VER_V2) {
823  		request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
824  						    priv_req);
825  		if (!request)
826  			return;
827  	}
828  
829  	if (request->complete) {
830  		spin_unlock(&priv_dev->lock);
831  		usb_gadget_giveback_request(&priv_ep->endpoint,
832  					    request);
833  		spin_lock(&priv_dev->lock);
834  	}
835  
836  	if (request->buf == priv_dev->zlp_buf)
837  		cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
838  }
839  
840  static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
841  {
842  	/* Work around for stale data address in TRB*/
843  	if (priv_ep->wa1_set) {
844  		trace_cdns3_wa1(priv_ep, "restore cycle bit");
845  
846  		priv_ep->wa1_set = 0;
847  		priv_ep->wa1_trb_index = 0xFFFF;
848  		if (priv_ep->wa1_cycle_bit) {
849  			priv_ep->wa1_trb->control =
850  				priv_ep->wa1_trb->control | cpu_to_le32(0x1);
851  		} else {
852  			priv_ep->wa1_trb->control =
853  				priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
854  		}
855  	}
856  }
857  
858  static void cdns3_free_aligned_request_buf(struct work_struct *work)
859  {
860  	struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
861  					aligned_buf_wq);
862  	struct cdns3_aligned_buf *buf, *tmp;
863  	unsigned long flags;
864  
865  	spin_lock_irqsave(&priv_dev->lock, flags);
866  
867  	list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
868  		if (!buf->in_use) {
869  			list_del(&buf->list);
870  
871  			/*
872  			 * Re-enable interrupts to free DMA capable memory.
873  			 * Driver can't free this memory with disabled
874  			 * interrupts.
875  			 */
876  			spin_unlock_irqrestore(&priv_dev->lock, flags);
877  			dma_free_noncoherent(priv_dev->sysdev, buf->size,
878  					  buf->buf, buf->dma, buf->dir);
879  			kfree(buf);
880  			spin_lock_irqsave(&priv_dev->lock, flags);
881  		}
882  	}
883  
884  	spin_unlock_irqrestore(&priv_dev->lock, flags);
885  }
886  
887  static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
888  {
889  	struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
890  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
891  	struct cdns3_aligned_buf *buf;
892  
893  	/* check if buffer is aligned to 8. */
894  	if (!((uintptr_t)priv_req->request.buf & 0x7))
895  		return 0;
896  
897  	buf = priv_req->aligned_buf;
898  
899  	if (!buf || priv_req->request.length > buf->size) {
900  		buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
901  		if (!buf)
902  			return -ENOMEM;
903  
904  		buf->size = priv_req->request.length;
905  		buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ?
906  			DMA_TO_DEVICE : DMA_FROM_DEVICE;
907  
908  		buf->buf = dma_alloc_noncoherent(priv_dev->sysdev,
909  					      buf->size,
910  					      &buf->dma,
911  					      buf->dir,
912  					      GFP_ATOMIC);
913  		if (!buf->buf) {
914  			kfree(buf);
915  			return -ENOMEM;
916  		}
917  
918  		if (priv_req->aligned_buf) {
919  			trace_cdns3_free_aligned_request(priv_req);
920  			priv_req->aligned_buf->in_use = 0;
921  			queue_work(system_freezable_wq,
922  				   &priv_dev->aligned_buf_wq);
923  		}
924  
925  		buf->in_use = 1;
926  		priv_req->aligned_buf = buf;
927  
928  		list_add_tail(&buf->list,
929  			      &priv_dev->aligned_buf_list);
930  	}
931  
932  	if (priv_ep->dir == USB_DIR_IN) {
933  		/* Make DMA buffer CPU accessible */
934  		dma_sync_single_for_cpu(priv_dev->sysdev,
935  			buf->dma, buf->size, buf->dir);
936  		memcpy(buf->buf, priv_req->request.buf,
937  		       priv_req->request.length);
938  	}
939  
940  	/* Transfer DMA buffer ownership back to device */
941  	dma_sync_single_for_device(priv_dev->sysdev,
942  			buf->dma, buf->size, buf->dir);
943  
944  	priv_req->flags |= REQUEST_UNALIGNED;
945  	trace_cdns3_prepare_aligned_request(priv_req);
946  
947  	return 0;
948  }
949  
950  static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
951  				  struct cdns3_trb *trb)
952  {
953  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
954  
955  	if (!priv_ep->wa1_set) {
956  		u32 doorbell;
957  
958  		doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
959  
960  		if (doorbell) {
961  			priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
962  			priv_ep->wa1_set = 1;
963  			priv_ep->wa1_trb = trb;
964  			priv_ep->wa1_trb_index = priv_ep->enqueue;
965  			trace_cdns3_wa1(priv_ep, "set guard");
966  			return 0;
967  		}
968  	}
969  	return 1;
970  }
971  
972  static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
973  					     struct cdns3_endpoint *priv_ep)
974  {
975  	int dma_index;
976  	u32 doorbell;
977  
978  	doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
979  	dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
980  
981  	if (!doorbell || dma_index != priv_ep->wa1_trb_index)
982  		cdns3_wa1_restore_cycle_bit(priv_ep);
983  }
984  
985  static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
986  					struct usb_request *request)
987  {
988  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
989  	struct cdns3_request *priv_req;
990  	struct cdns3_trb *trb;
991  	dma_addr_t trb_dma;
992  	int address;
993  	u32 control;
994  	u32 length;
995  	u32 tdl;
996  	unsigned int sg_idx = priv_ep->stream_sg_idx;
997  
998  	priv_req = to_cdns3_request(request);
999  	address = priv_ep->endpoint.desc->bEndpointAddress;
1000  
1001  	priv_ep->flags |= EP_PENDING_REQUEST;
1002  
1003  	/* must allocate buffer aligned to 8 */
1004  	if (priv_req->flags & REQUEST_UNALIGNED)
1005  		trb_dma = priv_req->aligned_buf->dma;
1006  	else
1007  		trb_dma = request->dma;
1008  
1009  	/*  For stream capable endpoints driver use only single TD. */
1010  	trb = priv_ep->trb_pool + priv_ep->enqueue;
1011  	priv_req->start_trb = priv_ep->enqueue;
1012  	priv_req->end_trb = priv_req->start_trb;
1013  	priv_req->trb = trb;
1014  
1015  	cdns3_select_ep(priv_ep->cdns3_dev, address);
1016  
1017  	control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
1018  		  TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
1019  
1020  	if (!request->num_sgs) {
1021  		trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
1022  		length = request->length;
1023  	} else {
1024  		trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
1025  		length = request->sg[sg_idx].length;
1026  	}
1027  
1028  	tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
1029  
1030  	trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
1031  
1032  	/*
1033  	 * For DEV_VER_V2 controller version we have enabled
1034  	 * USB_CONF2_EN_TDL_TRB in DMULT configuration.
1035  	 * This enables TDL calculation based on TRB, hence setting TDL in TRB.
1036  	 */
1037  	if (priv_dev->dev_ver >= DEV_VER_V2) {
1038  		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
1039  			trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
1040  	}
1041  	priv_req->flags |= REQUEST_PENDING;
1042  
1043  	trb->control = cpu_to_le32(control);
1044  
1045  	trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
1046  
1047  	/*
1048  	 * Memory barrier - Cycle Bit must be set before trb->length  and
1049  	 * trb->buffer fields.
1050  	 */
1051  	wmb();
1052  
1053  	/* always first element */
1054  	writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
1055  	       &priv_dev->regs->ep_traddr);
1056  
1057  	if (!(priv_ep->flags & EP_STALLED)) {
1058  		trace_cdns3_ring(priv_ep);
1059  		/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
1060  		writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
1061  
1062  		priv_ep->prime_flag = false;
1063  
1064  		/*
1065  		 * Controller version DEV_VER_V2 tdl calculation
1066  		 * is based on TRB
1067  		 */
1068  
1069  		if (priv_dev->dev_ver < DEV_VER_V2)
1070  			writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
1071  			       &priv_dev->regs->ep_cmd);
1072  		else if (priv_dev->dev_ver > DEV_VER_V2)
1073  			writel(tdl, &priv_dev->regs->ep_tdl);
1074  
1075  		priv_ep->last_stream_id = priv_req->request.stream_id;
1076  		writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1077  		writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
1078  		       EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
1079  
1080  		trace_cdns3_doorbell_epx(priv_ep->name,
1081  					 readl(&priv_dev->regs->ep_traddr));
1082  	}
1083  
1084  	/* WORKAROUND for transition to L0 */
1085  	__cdns3_gadget_wakeup(priv_dev);
1086  
1087  	return 0;
1088  }
1089  
1090  static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
1091  {
1092  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1093  
1094  	if (priv_dev->dev_ver < DEV_VER_V3)
1095  		return;
1096  
1097  	if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
1098  		writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
1099  		writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1100  	}
1101  }
1102  
1103  /**
1104   * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
1105   * @priv_ep: endpoint object
1106   * @request: request object
1107   *
1108   * Returns zero on success or negative value on failure
1109   */
1110  static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
1111  				 struct usb_request *request)
1112  {
1113  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1114  	struct cdns3_request *priv_req;
1115  	struct cdns3_trb *trb;
1116  	struct cdns3_trb *link_trb = NULL;
1117  	dma_addr_t trb_dma;
1118  	u32 togle_pcs = 1;
1119  	int sg_iter = 0;
1120  	int num_trb;
1121  	int address;
1122  	u32 control;
1123  	int pcs;
1124  	u16 total_tdl = 0;
1125  	struct scatterlist *s = NULL;
1126  	bool sg_supported = !!(request->num_mapped_sgs);
1127  
1128  	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
1129  		num_trb = priv_ep->interval;
1130  	else
1131  		num_trb = sg_supported ? request->num_mapped_sgs : 1;
1132  
1133  	if (num_trb > priv_ep->free_trbs) {
1134  		priv_ep->flags |= EP_RING_FULL;
1135  		return -ENOBUFS;
1136  	}
1137  
1138  	priv_req = to_cdns3_request(request);
1139  	address = priv_ep->endpoint.desc->bEndpointAddress;
1140  
1141  	priv_ep->flags |= EP_PENDING_REQUEST;
1142  
1143  	/* must allocate buffer aligned to 8 */
1144  	if (priv_req->flags & REQUEST_UNALIGNED)
1145  		trb_dma = priv_req->aligned_buf->dma;
1146  	else
1147  		trb_dma = request->dma;
1148  
1149  	trb = priv_ep->trb_pool + priv_ep->enqueue;
1150  	priv_req->start_trb = priv_ep->enqueue;
1151  	priv_req->trb = trb;
1152  
1153  	cdns3_select_ep(priv_ep->cdns3_dev, address);
1154  
1155  	/* prepare ring */
1156  	if ((priv_ep->enqueue + num_trb)  >= (priv_ep->num_trbs - 1)) {
1157  		int doorbell, dma_index;
1158  		u32 ch_bit = 0;
1159  
1160  		doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1161  		dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1162  
1163  		/* Driver can't update LINK TRB if it is current processed. */
1164  		if (doorbell && dma_index == priv_ep->num_trbs - 1) {
1165  			priv_ep->flags |= EP_DEFERRED_DRDY;
1166  			return -ENOBUFS;
1167  		}
1168  
1169  		/*updating C bt in  Link TRB before starting DMA*/
1170  		link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
1171  		/*
1172  		 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
1173  		 * that DMA stuck at the LINK TRB.
1174  		 * On the other hand, removing TRB_CHAIN for longer TRs for
1175  		 * epXout cause that DMA stuck after handling LINK TRB.
1176  		 * To eliminate this strange behavioral driver set TRB_CHAIN
1177  		 * bit only for TR size > 2.
1178  		 */
1179  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
1180  		    TRBS_PER_SEGMENT > 2)
1181  			ch_bit = TRB_CHAIN;
1182  
1183  		link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
1184  				    TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
1185  	}
1186  
1187  	if (priv_dev->dev_ver <= DEV_VER_V2)
1188  		togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
1189  
1190  	if (sg_supported)
1191  		s = request->sg;
1192  
1193  	/* set incorrect Cycle Bit for first trb*/
1194  	control = priv_ep->pcs ? 0 : TRB_CYCLE;
1195  	trb->length = 0;
1196  	if (priv_dev->dev_ver >= DEV_VER_V2) {
1197  		u16 td_size;
1198  
1199  		td_size = DIV_ROUND_UP(request->length,
1200  				       priv_ep->endpoint.maxpacket);
1201  		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
1202  			trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
1203  		else
1204  			control |= TRB_TDL_HS_SIZE(td_size);
1205  	}
1206  
1207  	do {
1208  		u32 length;
1209  
1210  		/* fill TRB */
1211  		control |= TRB_TYPE(TRB_NORMAL);
1212  		if (sg_supported) {
1213  			trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s)));
1214  			length = sg_dma_len(s);
1215  		} else {
1216  			trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
1217  			length = request->length;
1218  		}
1219  
1220  		if (priv_ep->flags & EP_TDLCHK_EN)
1221  			total_tdl += DIV_ROUND_UP(length,
1222  					       priv_ep->endpoint.maxpacket);
1223  
1224  		trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
1225  					TRB_LEN(length));
1226  		pcs = priv_ep->pcs ? TRB_CYCLE : 0;
1227  
1228  		/*
1229  		 * first trb should be prepared as last to avoid processing
1230  		 *  transfer to early
1231  		 */
1232  		if (sg_iter != 0)
1233  			control |= pcs;
1234  
1235  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
1236  			control |= TRB_IOC | TRB_ISP;
1237  		} else {
1238  			/* for last element in TD or in SG list */
1239  			if (sg_iter == (num_trb - 1) && sg_iter != 0)
1240  				control |= pcs | TRB_IOC | TRB_ISP;
1241  		}
1242  
1243  		if (sg_iter)
1244  			trb->control = cpu_to_le32(control);
1245  		else
1246  			priv_req->trb->control = cpu_to_le32(control);
1247  
1248  		if (sg_supported) {
1249  			trb->control |= cpu_to_le32(TRB_ISP);
1250  			/* Don't set chain bit for last TRB */
1251  			if (sg_iter < num_trb - 1)
1252  				trb->control |= cpu_to_le32(TRB_CHAIN);
1253  
1254  			s = sg_next(s);
1255  		}
1256  
1257  		control = 0;
1258  		++sg_iter;
1259  		priv_req->end_trb = priv_ep->enqueue;
1260  		cdns3_ep_inc_enq(priv_ep);
1261  		trb = priv_ep->trb_pool + priv_ep->enqueue;
1262  		trb->length = 0;
1263  	} while (sg_iter < num_trb);
1264  
1265  	trb = priv_req->trb;
1266  
1267  	priv_req->flags |= REQUEST_PENDING;
1268  	priv_req->num_of_trb = num_trb;
1269  
1270  	if (sg_iter == 1)
1271  		trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
1272  
1273  	if (priv_dev->dev_ver < DEV_VER_V2 &&
1274  	    (priv_ep->flags & EP_TDLCHK_EN)) {
1275  		u16 tdl = total_tdl;
1276  		u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
1277  
1278  		if (tdl > EP_CMD_TDL_MAX) {
1279  			tdl = EP_CMD_TDL_MAX;
1280  			priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
1281  		}
1282  
1283  		if (old_tdl < tdl) {
1284  			tdl -= old_tdl;
1285  			writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
1286  			       &priv_dev->regs->ep_cmd);
1287  		}
1288  	}
1289  
1290  	/*
1291  	 * Memory barrier - cycle bit must be set before other filds in trb.
1292  	 */
1293  	wmb();
1294  
1295  	/* give the TD to the consumer*/
1296  	if (togle_pcs)
1297  		trb->control = trb->control ^ cpu_to_le32(1);
1298  
1299  	if (priv_dev->dev_ver <= DEV_VER_V2)
1300  		cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
1301  
1302  	if (num_trb > 1) {
1303  		int i = 0;
1304  
1305  		while (i < num_trb) {
1306  			trace_cdns3_prepare_trb(priv_ep, trb + i);
1307  			if (trb + i == link_trb) {
1308  				trb = priv_ep->trb_pool;
1309  				num_trb = num_trb - i;
1310  				i = 0;
1311  			} else {
1312  				i++;
1313  			}
1314  		}
1315  	} else {
1316  		trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
1317  	}
1318  
1319  	/*
1320  	 * Memory barrier - Cycle Bit must be set before trb->length  and
1321  	 * trb->buffer fields.
1322  	 */
1323  	wmb();
1324  
1325  	/*
1326  	 * For DMULT mode we can set address to transfer ring only once after
1327  	 * enabling endpoint.
1328  	 */
1329  	if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
1330  		/*
1331  		 * Until SW is not ready to handle the OUT transfer the ISO OUT
1332  		 * Endpoint should be disabled (EP_CFG.ENABLE = 0).
1333  		 * EP_CFG_ENABLE must be set before updating ep_traddr.
1334  		 */
1335  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir &&
1336  		    !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
1337  			priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
1338  			cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
1339  					       EP_CFG_ENABLE);
1340  		}
1341  
1342  		writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
1343  					priv_req->start_trb * TRB_SIZE),
1344  					&priv_dev->regs->ep_traddr);
1345  
1346  		priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
1347  	}
1348  
1349  	if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
1350  		trace_cdns3_ring(priv_ep);
1351  		/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
1352  		writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
1353  		writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1354  		cdns3_rearm_drdy_if_needed(priv_ep);
1355  		trace_cdns3_doorbell_epx(priv_ep->name,
1356  					 readl(&priv_dev->regs->ep_traddr));
1357  	}
1358  
1359  	/* WORKAROUND for transition to L0 */
1360  	__cdns3_gadget_wakeup(priv_dev);
1361  
1362  	return 0;
1363  }
1364  
1365  void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
1366  {
1367  	struct cdns3_endpoint *priv_ep;
1368  	struct usb_ep *ep;
1369  
1370  	if (priv_dev->hw_configured_flag)
1371  		return;
1372  
1373  	writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
1374  
1375  	cdns3_set_register_bit(&priv_dev->regs->usb_conf,
1376  			       USB_CONF_U1EN | USB_CONF_U2EN);
1377  
1378  	priv_dev->hw_configured_flag = 1;
1379  
1380  	list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1381  		if (ep->enabled) {
1382  			priv_ep = ep_to_cdns3_ep(ep);
1383  			cdns3_start_all_request(priv_dev, priv_ep);
1384  		}
1385  	}
1386  
1387  	cdns3_allow_enable_l1(priv_dev, 1);
1388  }
1389  
1390  /**
1391   * cdns3_trb_handled - check whether trb has been handled by DMA
1392   *
1393   * @priv_ep: extended endpoint object.
1394   * @priv_req: request object for checking
1395   *
1396   * Endpoint must be selected before invoking this function.
1397   *
1398   * Returns false if request has not been handled by DMA, else returns true.
1399   *
1400   * SR - start ring
1401   * ER -  end ring
1402   * DQ = priv_ep->dequeue - dequeue position
1403   * EQ = priv_ep->enqueue -  enqueue position
1404   * ST = priv_req->start_trb - index of first TRB in transfer ring
1405   * ET = priv_req->end_trb - index of last TRB in transfer ring
1406   * CI = current_index - index of processed TRB by DMA.
1407   *
1408   * As first step, we check if the TRB between the ST and ET.
1409   * Then, we check if cycle bit for index priv_ep->dequeue
1410   * is correct.
1411   *
1412   * some rules:
1413   * 1. priv_ep->dequeue never equals to current_index.
1414   * 2  priv_ep->enqueue never exceed priv_ep->dequeue
1415   * 3. exception: priv_ep->enqueue == priv_ep->dequeue
1416   *    and priv_ep->free_trbs is zero.
1417   *    This case indicate that TR is full.
1418   *
1419   * At below two cases, the request have been handled.
1420   * Case 1 - priv_ep->dequeue < current_index
1421   *      SR ... EQ ... DQ ... CI ... ER
1422   *      SR ... DQ ... CI ... EQ ... ER
1423   *
1424   * Case 2 - priv_ep->dequeue > current_index
1425   * This situation takes place when CI go through the LINK TRB at the end of
1426   * transfer ring.
1427   *      SR ... CI ... EQ ... DQ ... ER
1428   */
1429  static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep,
1430  				  struct cdns3_request *priv_req)
1431  {
1432  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1433  	struct cdns3_trb *trb;
1434  	int current_index = 0;
1435  	int handled = 0;
1436  	int doorbell;
1437  
1438  	current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1439  	doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1440  
1441  	/* current trb doesn't belong to this request */
1442  	if (priv_req->start_trb < priv_req->end_trb) {
1443  		if (priv_ep->dequeue > priv_req->end_trb)
1444  			goto finish;
1445  
1446  		if (priv_ep->dequeue < priv_req->start_trb)
1447  			goto finish;
1448  	}
1449  
1450  	if ((priv_req->start_trb > priv_req->end_trb) &&
1451  		(priv_ep->dequeue > priv_req->end_trb) &&
1452  		(priv_ep->dequeue < priv_req->start_trb))
1453  		goto finish;
1454  
1455  	if ((priv_req->start_trb == priv_req->end_trb) &&
1456  		(priv_ep->dequeue != priv_req->end_trb))
1457  		goto finish;
1458  
1459  	trb = &priv_ep->trb_pool[priv_ep->dequeue];
1460  
1461  	if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
1462  		goto finish;
1463  
1464  	if (doorbell == 1 && current_index == priv_ep->dequeue)
1465  		goto finish;
1466  
1467  	/* The corner case for TRBS_PER_SEGMENT equal 2). */
1468  	if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1469  		handled = 1;
1470  		goto finish;
1471  	}
1472  
1473  	if (priv_ep->enqueue == priv_ep->dequeue &&
1474  	    priv_ep->free_trbs == 0) {
1475  		handled = 1;
1476  	} else if (priv_ep->dequeue < current_index) {
1477  		if ((current_index == (priv_ep->num_trbs - 1)) &&
1478  		    !priv_ep->dequeue)
1479  			goto finish;
1480  
1481  		handled = 1;
1482  	} else if (priv_ep->dequeue  > current_index) {
1483  			handled = 1;
1484  	}
1485  
1486  finish:
1487  	trace_cdns3_request_handled(priv_req, current_index, handled);
1488  
1489  	return handled;
1490  }
1491  
1492  static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
1493  				     struct cdns3_endpoint *priv_ep)
1494  {
1495  	struct cdns3_request *priv_req;
1496  	struct usb_request *request;
1497  	struct cdns3_trb *trb;
1498  	bool request_handled = false;
1499  	bool transfer_end = false;
1500  
1501  	while (!list_empty(&priv_ep->pending_req_list)) {
1502  		request = cdns3_next_request(&priv_ep->pending_req_list);
1503  		priv_req = to_cdns3_request(request);
1504  
1505  		trb = priv_ep->trb_pool + priv_ep->dequeue;
1506  
1507  		/* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
1508  		while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
1509  			trace_cdns3_complete_trb(priv_ep, trb);
1510  			cdns3_ep_inc_deq(priv_ep);
1511  			trb = priv_ep->trb_pool + priv_ep->dequeue;
1512  		}
1513  
1514  		if (!request->stream_id) {
1515  			/* Re-select endpoint. It could be changed by other CPU
1516  			 * during handling usb_gadget_giveback_request.
1517  			 */
1518  			cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1519  
1520  			while (cdns3_trb_handled(priv_ep, priv_req)) {
1521  				priv_req->finished_trb++;
1522  				if (priv_req->finished_trb >= priv_req->num_of_trb)
1523  					request_handled = true;
1524  
1525  				trb = priv_ep->trb_pool + priv_ep->dequeue;
1526  				trace_cdns3_complete_trb(priv_ep, trb);
1527  
1528  				if (!transfer_end)
1529  					request->actual +=
1530  						TRB_LEN(le32_to_cpu(trb->length));
1531  
1532  				if (priv_req->num_of_trb > 1 &&
1533  					le32_to_cpu(trb->control) & TRB_SMM)
1534  					transfer_end = true;
1535  
1536  				cdns3_ep_inc_deq(priv_ep);
1537  			}
1538  
1539  			if (request_handled) {
1540  				cdns3_gadget_giveback(priv_ep, priv_req, 0);
1541  				request_handled = false;
1542  				transfer_end = false;
1543  			} else {
1544  				goto prepare_next_td;
1545  			}
1546  
1547  			if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
1548  			    TRBS_PER_SEGMENT == 2)
1549  				break;
1550  		} else {
1551  			/* Re-select endpoint. It could be changed by other CPU
1552  			 * during handling usb_gadget_giveback_request.
1553  			 */
1554  			cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1555  
1556  			trb = priv_ep->trb_pool;
1557  			trace_cdns3_complete_trb(priv_ep, trb);
1558  
1559  			if (trb != priv_req->trb)
1560  				dev_warn(priv_dev->dev,
1561  					 "request_trb=0x%p, queue_trb=0x%p\n",
1562  					 priv_req->trb, trb);
1563  
1564  			request->actual += TRB_LEN(le32_to_cpu(trb->length));
1565  
1566  			if (!request->num_sgs ||
1567  			    (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
1568  				priv_ep->stream_sg_idx = 0;
1569  				cdns3_gadget_giveback(priv_ep, priv_req, 0);
1570  			} else {
1571  				priv_ep->stream_sg_idx++;
1572  				cdns3_ep_run_stream_transfer(priv_ep, request);
1573  			}
1574  			break;
1575  		}
1576  	}
1577  	priv_ep->flags &= ~EP_PENDING_REQUEST;
1578  
1579  prepare_next_td:
1580  	if (!(priv_ep->flags & EP_STALLED) &&
1581  	    !(priv_ep->flags & EP_STALL_PENDING))
1582  		cdns3_start_all_request(priv_dev, priv_ep);
1583  }
1584  
1585  void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
1586  {
1587  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1588  
1589  	cdns3_wa1_restore_cycle_bit(priv_ep);
1590  
1591  	if (rearm) {
1592  		trace_cdns3_ring(priv_ep);
1593  
1594  		/* Cycle Bit must be updated before arming DMA. */
1595  		wmb();
1596  		writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1597  
1598  		__cdns3_gadget_wakeup(priv_dev);
1599  
1600  		trace_cdns3_doorbell_epx(priv_ep->name,
1601  					 readl(&priv_dev->regs->ep_traddr));
1602  	}
1603  }
1604  
1605  static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
1606  {
1607  	u16 tdl = priv_ep->pending_tdl;
1608  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1609  
1610  	if (tdl > EP_CMD_TDL_MAX) {
1611  		tdl = EP_CMD_TDL_MAX;
1612  		priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
1613  	} else {
1614  		priv_ep->pending_tdl = 0;
1615  	}
1616  
1617  	writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
1618  }
1619  
1620  /**
1621   * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
1622   * @priv_ep: endpoint object
1623   *
1624   * Returns 0
1625   */
1626  static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
1627  {
1628  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1629  	u32 ep_sts_reg;
1630  	struct usb_request *deferred_request;
1631  	struct usb_request *pending_request;
1632  	u32 tdl = 0;
1633  
1634  	cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1635  
1636  	trace_cdns3_epx_irq(priv_dev, priv_ep);
1637  
1638  	ep_sts_reg = readl(&priv_dev->regs->ep_sts);
1639  	writel(ep_sts_reg, &priv_dev->regs->ep_sts);
1640  
1641  	if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
1642  		bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
1643  
1644  		tdl = cdns3_get_tdl(priv_dev);
1645  
1646  		/*
1647  		 * Continue the previous transfer:
1648  		 * There is some racing between ERDY and PRIME. The device send
1649  		 * ERDY and almost in the same time Host send PRIME. It cause
1650  		 * that host ignore the ERDY packet and driver has to send it
1651  		 * again.
1652  		 */
1653  		if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
1654  		    EP_STS_HOSTPP(ep_sts_reg))) {
1655  			writel(EP_CMD_ERDY |
1656  			       EP_CMD_ERDY_SID(priv_ep->last_stream_id),
1657  			       &priv_dev->regs->ep_cmd);
1658  			ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
1659  		} else {
1660  			priv_ep->prime_flag = true;
1661  
1662  			pending_request = cdns3_next_request(&priv_ep->pending_req_list);
1663  			deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
1664  
1665  			if (deferred_request && !pending_request) {
1666  				cdns3_start_all_request(priv_dev, priv_ep);
1667  			}
1668  		}
1669  	}
1670  
1671  	if (ep_sts_reg & EP_STS_TRBERR) {
1672  		if (priv_ep->flags & EP_STALL_PENDING &&
1673  		    !(ep_sts_reg & EP_STS_DESCMIS &&
1674  		    priv_dev->dev_ver < DEV_VER_V2)) {
1675  			cdns3_ep_stall_flush(priv_ep);
1676  		}
1677  
1678  		/*
1679  		 * For isochronous transfer driver completes request on
1680  		 * IOC or on TRBERR. IOC appears only when device receive
1681  		 * OUT data packet. If host disable stream or lost some packet
1682  		 * then the only way to finish all queued transfer is to do it
1683  		 * on TRBERR event.
1684  		 */
1685  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
1686  		    !priv_ep->wa1_set) {
1687  			if (!priv_ep->dir) {
1688  				u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
1689  
1690  				ep_cfg &= ~EP_CFG_ENABLE;
1691  				writel(ep_cfg, &priv_dev->regs->ep_cfg);
1692  				priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
1693  			}
1694  			cdns3_transfer_completed(priv_dev, priv_ep);
1695  		} else if (!(priv_ep->flags & EP_STALLED) &&
1696  			  !(priv_ep->flags & EP_STALL_PENDING)) {
1697  			if (priv_ep->flags & EP_DEFERRED_DRDY) {
1698  				priv_ep->flags &= ~EP_DEFERRED_DRDY;
1699  				cdns3_start_all_request(priv_dev, priv_ep);
1700  			} else {
1701  				cdns3_rearm_transfer(priv_ep,
1702  						     priv_ep->wa1_set);
1703  			}
1704  		}
1705  	}
1706  
1707  	if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
1708  	    (ep_sts_reg & EP_STS_IOT)) {
1709  		if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
1710  			if (ep_sts_reg & EP_STS_ISP)
1711  				priv_ep->flags |= EP_QUIRK_END_TRANSFER;
1712  			else
1713  				priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
1714  		}
1715  
1716  		if (!priv_ep->use_streams) {
1717  			if ((ep_sts_reg & EP_STS_IOC) ||
1718  			    (ep_sts_reg & EP_STS_ISP)) {
1719  				cdns3_transfer_completed(priv_dev, priv_ep);
1720  			} else if ((priv_ep->flags & EP_TDLCHK_EN) &
1721  				   priv_ep->pending_tdl) {
1722  				/* handle IOT with pending tdl */
1723  				cdns3_reprogram_tdl(priv_ep);
1724  			}
1725  		} else if (priv_ep->dir == USB_DIR_OUT) {
1726  			priv_ep->ep_sts_pending |= ep_sts_reg;
1727  		} else if (ep_sts_reg & EP_STS_IOT) {
1728  			cdns3_transfer_completed(priv_dev, priv_ep);
1729  		}
1730  	}
1731  
1732  	/*
1733  	 * MD_EXIT interrupt sets when stream capable endpoint exits
1734  	 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
1735  	 */
1736  	if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
1737  	    (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
1738  		priv_ep->ep_sts_pending = 0;
1739  		cdns3_transfer_completed(priv_dev, priv_ep);
1740  	}
1741  
1742  	/*
1743  	 * WA2: this condition should only be meet when
1744  	 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
1745  	 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
1746  	 * In other cases this interrupt will be disabled.
1747  	 */
1748  	if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
1749  	    !(priv_ep->flags & EP_STALLED))
1750  		cdns3_wa2_descmissing_packet(priv_ep);
1751  
1752  	return 0;
1753  }
1754  
1755  static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
1756  {
1757  	if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect)
1758  		priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
1759  }
1760  
1761  /**
1762   * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
1763   * @priv_dev: extended gadget object
1764   * @usb_ists: bitmap representation of device's reported interrupts
1765   * (usb_ists register value)
1766   */
1767  static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
1768  					      u32 usb_ists)
1769  __must_hold(&priv_dev->lock)
1770  {
1771  	int speed = 0;
1772  
1773  	trace_cdns3_usb_irq(priv_dev, usb_ists);
1774  	if (usb_ists & USB_ISTS_L1ENTI) {
1775  		/*
1776  		 * WORKAROUND: CDNS3 controller has issue with hardware resuming
1777  		 * from L1. To fix it, if any DMA transfer is pending driver
1778  		 * must starts driving resume signal immediately.
1779  		 */
1780  		if (readl(&priv_dev->regs->drbl))
1781  			__cdns3_gadget_wakeup(priv_dev);
1782  	}
1783  
1784  	/* Connection detected */
1785  	if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
1786  		speed = cdns3_get_speed(priv_dev);
1787  		priv_dev->gadget.speed = speed;
1788  		usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
1789  		cdns3_ep0_config(priv_dev);
1790  	}
1791  
1792  	/* Disconnection detected */
1793  	if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
1794  		spin_unlock(&priv_dev->lock);
1795  		cdns3_disconnect_gadget(priv_dev);
1796  		spin_lock(&priv_dev->lock);
1797  		priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
1798  		usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
1799  		cdns3_hw_reset_eps_config(priv_dev);
1800  	}
1801  
1802  	if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
1803  		if (priv_dev->gadget_driver &&
1804  		    priv_dev->gadget_driver->suspend) {
1805  			spin_unlock(&priv_dev->lock);
1806  			priv_dev->gadget_driver->suspend(&priv_dev->gadget);
1807  			spin_lock(&priv_dev->lock);
1808  		}
1809  	}
1810  
1811  	if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
1812  		if (priv_dev->gadget_driver &&
1813  		    priv_dev->gadget_driver->resume) {
1814  			spin_unlock(&priv_dev->lock);
1815  			priv_dev->gadget_driver->resume(&priv_dev->gadget);
1816  			spin_lock(&priv_dev->lock);
1817  		}
1818  	}
1819  
1820  	/* reset*/
1821  	if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
1822  		if (priv_dev->gadget_driver) {
1823  			spin_unlock(&priv_dev->lock);
1824  			usb_gadget_udc_reset(&priv_dev->gadget,
1825  					     priv_dev->gadget_driver);
1826  			spin_lock(&priv_dev->lock);
1827  
1828  			/*read again to check the actual speed*/
1829  			speed = cdns3_get_speed(priv_dev);
1830  			priv_dev->gadget.speed = speed;
1831  			cdns3_hw_reset_eps_config(priv_dev);
1832  			cdns3_ep0_config(priv_dev);
1833  		}
1834  	}
1835  }
1836  
1837  /**
1838   * cdns3_device_irq_handler - interrupt handler for device part of controller
1839   *
1840   * @irq: irq number for cdns3 core device
1841   * @data: structure of cdns3
1842   *
1843   * Returns IRQ_HANDLED or IRQ_NONE
1844   */
1845  static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
1846  {
1847  	struct cdns3_device *priv_dev = data;
1848  	struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
1849  	irqreturn_t ret = IRQ_NONE;
1850  	u32 reg;
1851  
1852  	if (cdns->in_lpm)
1853  		return ret;
1854  
1855  	/* check USB device interrupt */
1856  	reg = readl(&priv_dev->regs->usb_ists);
1857  	if (reg) {
1858  		/* After masking interrupts the new interrupts won't be
1859  		 * reported in usb_ists/ep_ists. In order to not lose some
1860  		 * of them driver disables only detected interrupts.
1861  		 * They will be enabled ASAP after clearing source of
1862  		 * interrupt. This an unusual behavior only applies to
1863  		 * usb_ists register.
1864  		 */
1865  		reg = ~reg & readl(&priv_dev->regs->usb_ien);
1866  		/* mask deferred interrupt. */
1867  		writel(reg, &priv_dev->regs->usb_ien);
1868  		ret = IRQ_WAKE_THREAD;
1869  	}
1870  
1871  	/* check endpoint interrupt */
1872  	reg = readl(&priv_dev->regs->ep_ists);
1873  	if (reg) {
1874  		writel(0, &priv_dev->regs->ep_ien);
1875  		ret = IRQ_WAKE_THREAD;
1876  	}
1877  
1878  	return ret;
1879  }
1880  
1881  /**
1882   * cdns3_device_thread_irq_handler - interrupt handler for device part
1883   * of controller
1884   *
1885   * @irq: irq number for cdns3 core device
1886   * @data: structure of cdns3
1887   *
1888   * Returns IRQ_HANDLED or IRQ_NONE
1889   */
1890  static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
1891  {
1892  	struct cdns3_device *priv_dev = data;
1893  	irqreturn_t ret = IRQ_NONE;
1894  	unsigned long flags;
1895  	unsigned int bit;
1896  	unsigned long reg;
1897  
1898  	spin_lock_irqsave(&priv_dev->lock, flags);
1899  
1900  	reg = readl(&priv_dev->regs->usb_ists);
1901  	if (reg) {
1902  		writel(reg, &priv_dev->regs->usb_ists);
1903  		writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
1904  		cdns3_check_usb_interrupt_proceed(priv_dev, reg);
1905  		ret = IRQ_HANDLED;
1906  	}
1907  
1908  	reg = readl(&priv_dev->regs->ep_ists);
1909  
1910  	/* handle default endpoint OUT */
1911  	if (reg & EP_ISTS_EP_OUT0) {
1912  		cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
1913  		ret = IRQ_HANDLED;
1914  	}
1915  
1916  	/* handle default endpoint IN */
1917  	if (reg & EP_ISTS_EP_IN0) {
1918  		cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
1919  		ret = IRQ_HANDLED;
1920  	}
1921  
1922  	/* check if interrupt from non default endpoint, if no exit */
1923  	reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
1924  	if (!reg)
1925  		goto irqend;
1926  
1927  	for_each_set_bit(bit, &reg,
1928  			 sizeof(u32) * BITS_PER_BYTE) {
1929  		cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
1930  		ret = IRQ_HANDLED;
1931  	}
1932  
1933  	if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
1934  		cdns3_wa2_check_outq_status(priv_dev);
1935  
1936  irqend:
1937  	writel(~0, &priv_dev->regs->ep_ien);
1938  	spin_unlock_irqrestore(&priv_dev->lock, flags);
1939  
1940  	return ret;
1941  }
1942  
1943  /**
1944   * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
1945   *
1946   * The real reservation will occur during write to EP_CFG register,
1947   * this function is used to check if the 'size' reservation is allowed.
1948   *
1949   * @priv_dev: extended gadget object
1950   * @size: the size (KB) for EP would like to allocate
1951   * @is_in: endpoint direction
1952   *
1953   * Return 0 if the required size can met or negative value on failure
1954   */
1955  static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
1956  					  int size, int is_in)
1957  {
1958  	int remained;
1959  
1960  	/* 2KB are reserved for EP0*/
1961  	remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
1962  
1963  	if (is_in) {
1964  		if (remained < size)
1965  			return -EPERM;
1966  
1967  		priv_dev->onchip_used_size += size;
1968  	} else {
1969  		int required;
1970  
1971  		/**
1972  		 *  ALL OUT EPs are shared the same chunk onchip memory, so
1973  		 * driver checks if it already has assigned enough buffers
1974  		 */
1975  		if (priv_dev->out_mem_is_allocated >= size)
1976  			return 0;
1977  
1978  		required = size - priv_dev->out_mem_is_allocated;
1979  
1980  		if (required > remained)
1981  			return -EPERM;
1982  
1983  		priv_dev->out_mem_is_allocated += required;
1984  		priv_dev->onchip_used_size += required;
1985  	}
1986  
1987  	return 0;
1988  }
1989  
1990  static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
1991  				  struct cdns3_endpoint *priv_ep)
1992  {
1993  	struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
1994  
1995  	/* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
1996  	if (priv_dev->dev_ver <= DEV_VER_V2)
1997  		writel(USB_CONF_DMULT, &regs->usb_conf);
1998  
1999  	if (priv_dev->dev_ver == DEV_VER_V2)
2000  		writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
2001  
2002  	if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
2003  		u32 mask;
2004  
2005  		if (priv_ep->dir)
2006  			mask = BIT(priv_ep->num + 16);
2007  		else
2008  			mask = BIT(priv_ep->num);
2009  
2010  		if (priv_ep->type != USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
2011  			cdns3_set_register_bit(&regs->tdl_from_trb, mask);
2012  			cdns3_set_register_bit(&regs->tdl_beh, mask);
2013  			cdns3_set_register_bit(&regs->tdl_beh2, mask);
2014  			cdns3_set_register_bit(&regs->dma_adv_td, mask);
2015  		}
2016  
2017  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
2018  			cdns3_set_register_bit(&regs->tdl_from_trb, mask);
2019  
2020  		cdns3_set_register_bit(&regs->dtrans, mask);
2021  	}
2022  }
2023  
2024  /**
2025   * cdns3_ep_config - Configure hardware endpoint
2026   * @priv_ep: extended endpoint object
2027   * @enable: set EP_CFG_ENABLE bit in ep_cfg register.
2028   */
2029  int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
2030  {
2031  	bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
2032  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2033  	u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
2034  	u32 max_packet_size = 0;
2035  	u8 maxburst = 0;
2036  	u32 ep_cfg = 0;
2037  	u8 buffering;
2038  	u8 mult = 0;
2039  	int ret;
2040  
2041  	buffering = CDNS3_EP_BUF_SIZE - 1;
2042  
2043  	cdns3_configure_dmult(priv_dev, priv_ep);
2044  
2045  	switch (priv_ep->type) {
2046  	case USB_ENDPOINT_XFER_INT:
2047  		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
2048  
2049  		if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
2050  			ep_cfg |= EP_CFG_TDL_CHK;
2051  		break;
2052  	case USB_ENDPOINT_XFER_BULK:
2053  		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
2054  
2055  		if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
2056  			ep_cfg |= EP_CFG_TDL_CHK;
2057  		break;
2058  	default:
2059  		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
2060  		mult = CDNS3_EP_ISO_HS_MULT - 1;
2061  		buffering = mult + 1;
2062  	}
2063  
2064  	switch (priv_dev->gadget.speed) {
2065  	case USB_SPEED_FULL:
2066  		max_packet_size = is_iso_ep ? 1023 : 64;
2067  		break;
2068  	case USB_SPEED_HIGH:
2069  		max_packet_size = is_iso_ep ? 1024 : 512;
2070  		break;
2071  	case USB_SPEED_SUPER:
2072  		/* It's limitation that driver assumes in driver. */
2073  		mult = 0;
2074  		max_packet_size = 1024;
2075  		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
2076  			maxburst = CDNS3_EP_ISO_SS_BURST - 1;
2077  			buffering = (mult + 1) *
2078  				    (maxburst + 1);
2079  
2080  			if (priv_ep->interval > 1)
2081  				buffering++;
2082  		} else {
2083  			maxburst = CDNS3_EP_BUF_SIZE - 1;
2084  		}
2085  		break;
2086  	default:
2087  		/* all other speed are not supported */
2088  		return -EINVAL;
2089  	}
2090  
2091  	if (max_packet_size == 1024)
2092  		priv_ep->trb_burst_size = 128;
2093  	else if (max_packet_size >= 512)
2094  		priv_ep->trb_burst_size = 64;
2095  	else
2096  		priv_ep->trb_burst_size = 16;
2097  
2098  	/* onchip buffer is only allocated before configuration */
2099  	if (!priv_dev->hw_configured_flag) {
2100  		ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
2101  						     !!priv_ep->dir);
2102  		if (ret) {
2103  			dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
2104  			return ret;
2105  		}
2106  	}
2107  
2108  	if (enable)
2109  		ep_cfg |= EP_CFG_ENABLE;
2110  
2111  	if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
2112  		if (priv_dev->dev_ver >= DEV_VER_V3) {
2113  			u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
2114  
2115  			/*
2116  			 * Stream capable endpoints are handled by using ep_tdl
2117  			 * register. Other endpoints use TDL from TRB feature.
2118  			 */
2119  			cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
2120  						 mask);
2121  		}
2122  
2123  		/*  Enable Stream Bit TDL chk and SID chk */
2124  		ep_cfg |=  EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
2125  	}
2126  
2127  	ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
2128  		  EP_CFG_MULT(mult) |
2129  		  EP_CFG_BUFFERING(buffering) |
2130  		  EP_CFG_MAXBURST(maxburst);
2131  
2132  	cdns3_select_ep(priv_dev, bEndpointAddress);
2133  	writel(ep_cfg, &priv_dev->regs->ep_cfg);
2134  	priv_ep->flags |= EP_CONFIGURED;
2135  
2136  	dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
2137  		priv_ep->name, ep_cfg);
2138  
2139  	return 0;
2140  }
2141  
2142  /* Find correct direction for HW endpoint according to description */
2143  static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
2144  				   struct cdns3_endpoint *priv_ep)
2145  {
2146  	return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
2147  	       (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
2148  }
2149  
2150  static struct
2151  cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
2152  					struct usb_endpoint_descriptor *desc)
2153  {
2154  	struct usb_ep *ep;
2155  	struct cdns3_endpoint *priv_ep;
2156  
2157  	list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2158  		unsigned long num;
2159  		int ret;
2160  		/* ep name pattern likes epXin or epXout */
2161  		char c[2] = {ep->name[2], '\0'};
2162  
2163  		ret = kstrtoul(c, 10, &num);
2164  		if (ret)
2165  			return ERR_PTR(ret);
2166  
2167  		priv_ep = ep_to_cdns3_ep(ep);
2168  		if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
2169  			if (!(priv_ep->flags & EP_CLAIMED)) {
2170  				priv_ep->num  = num;
2171  				return priv_ep;
2172  			}
2173  		}
2174  	}
2175  
2176  	return ERR_PTR(-ENOENT);
2177  }
2178  
2179  /*
2180   *  Cadence IP has one limitation that all endpoints must be configured
2181   * (Type & MaxPacketSize) before setting configuration through hardware
2182   * register, it means we can't change endpoints configuration after
2183   * set_configuration.
2184   *
2185   * This function set EP_CLAIMED flag which is added when the gadget driver
2186   * uses usb_ep_autoconfig to configure specific endpoint;
2187   * When the udc driver receives set_configurion request,
2188   * it goes through all claimed endpoints, and configure all endpoints
2189   * accordingly.
2190   *
2191   * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
2192   * ep_cfg register which can be changed after set_configuration, and do
2193   * some software operation accordingly.
2194   */
2195  static struct
2196  usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
2197  			      struct usb_endpoint_descriptor *desc,
2198  			      struct usb_ss_ep_comp_descriptor *comp_desc)
2199  {
2200  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2201  	struct cdns3_endpoint *priv_ep;
2202  	unsigned long flags;
2203  
2204  	priv_ep = cdns3_find_available_ep(priv_dev, desc);
2205  	if (IS_ERR(priv_ep)) {
2206  		dev_err(priv_dev->dev, "no available ep\n");
2207  		return NULL;
2208  	}
2209  
2210  	dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
2211  
2212  	spin_lock_irqsave(&priv_dev->lock, flags);
2213  	priv_ep->endpoint.desc = desc;
2214  	priv_ep->dir  = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
2215  	priv_ep->type = usb_endpoint_type(desc);
2216  	priv_ep->flags |= EP_CLAIMED;
2217  	priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
2218  
2219  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2220  	return &priv_ep->endpoint;
2221  }
2222  
2223  /**
2224   * cdns3_gadget_ep_alloc_request - Allocates request
2225   * @ep: endpoint object associated with request
2226   * @gfp_flags: gfp flags
2227   *
2228   * Returns allocated request address, NULL on allocation error
2229   */
2230  struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
2231  						  gfp_t gfp_flags)
2232  {
2233  	struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2234  	struct cdns3_request *priv_req;
2235  
2236  	priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
2237  	if (!priv_req)
2238  		return NULL;
2239  
2240  	priv_req->priv_ep = priv_ep;
2241  
2242  	trace_cdns3_alloc_request(priv_req);
2243  	return &priv_req->request;
2244  }
2245  
2246  /**
2247   * cdns3_gadget_ep_free_request - Free memory occupied by request
2248   * @ep: endpoint object associated with request
2249   * @request: request to free memory
2250   */
2251  void cdns3_gadget_ep_free_request(struct usb_ep *ep,
2252  				  struct usb_request *request)
2253  {
2254  	struct cdns3_request *priv_req = to_cdns3_request(request);
2255  
2256  	if (priv_req->aligned_buf)
2257  		priv_req->aligned_buf->in_use = 0;
2258  
2259  	trace_cdns3_free_request(priv_req);
2260  	kfree(priv_req);
2261  }
2262  
2263  /**
2264   * cdns3_gadget_ep_enable - Enable endpoint
2265   * @ep: endpoint object
2266   * @desc: endpoint descriptor
2267   *
2268   * Returns 0 on success, error code elsewhere
2269   */
2270  static int cdns3_gadget_ep_enable(struct usb_ep *ep,
2271  				  const struct usb_endpoint_descriptor *desc)
2272  {
2273  	struct cdns3_endpoint *priv_ep;
2274  	struct cdns3_device *priv_dev;
2275  	const struct usb_ss_ep_comp_descriptor *comp_desc;
2276  	u32 reg = EP_STS_EN_TRBERREN;
2277  	u32 bEndpointAddress;
2278  	unsigned long flags;
2279  	int enable = 1;
2280  	int ret = 0;
2281  	int val;
2282  
2283  	priv_ep = ep_to_cdns3_ep(ep);
2284  	priv_dev = priv_ep->cdns3_dev;
2285  	comp_desc = priv_ep->endpoint.comp_desc;
2286  
2287  	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
2288  		dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
2289  		return -EINVAL;
2290  	}
2291  
2292  	if (!desc->wMaxPacketSize) {
2293  		dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
2294  		return -EINVAL;
2295  	}
2296  
2297  	if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
2298  			  "%s is already enabled\n", priv_ep->name))
2299  		return 0;
2300  
2301  	spin_lock_irqsave(&priv_dev->lock, flags);
2302  
2303  	priv_ep->endpoint.desc = desc;
2304  	priv_ep->type = usb_endpoint_type(desc);
2305  	priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
2306  
2307  	if (priv_ep->interval > ISO_MAX_INTERVAL &&
2308  	    priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
2309  		dev_err(priv_dev->dev, "Driver is limited to %d period\n",
2310  			ISO_MAX_INTERVAL);
2311  
2312  		ret =  -EINVAL;
2313  		goto exit;
2314  	}
2315  
2316  	bEndpointAddress = priv_ep->num | priv_ep->dir;
2317  	cdns3_select_ep(priv_dev, bEndpointAddress);
2318  
2319  	/*
2320  	 * For some versions of controller at some point during ISO OUT traffic
2321  	 * DMA reads Transfer Ring for the EP which has never got doorbell.
2322  	 * This issue was detected only on simulation, but to avoid this issue
2323  	 * driver add protection against it. To fix it driver enable ISO OUT
2324  	 * endpoint before setting DRBL. This special treatment of ISO OUT
2325  	 * endpoints are recommended by controller specification.
2326  	 */
2327  	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir)
2328  		enable = 0;
2329  
2330  	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
2331  		/*
2332  		 * Enable stream support (SS mode) related interrupts
2333  		 * in EP_STS_EN Register
2334  		 */
2335  		if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
2336  			reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
2337  				EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
2338  				EP_STS_EN_STREAMREN;
2339  			priv_ep->use_streams = true;
2340  			ret = cdns3_ep_config(priv_ep, enable);
2341  			priv_dev->using_streams |= true;
2342  		}
2343  	} else {
2344  		ret = cdns3_ep_config(priv_ep, enable);
2345  	}
2346  
2347  	if (ret)
2348  		goto exit;
2349  
2350  	ret = cdns3_allocate_trb_pool(priv_ep);
2351  	if (ret)
2352  		goto exit;
2353  
2354  	bEndpointAddress = priv_ep->num | priv_ep->dir;
2355  	cdns3_select_ep(priv_dev, bEndpointAddress);
2356  
2357  	trace_cdns3_gadget_ep_enable(priv_ep);
2358  
2359  	writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2360  
2361  	ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2362  					!(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
2363  					1, 1000);
2364  
2365  	if (unlikely(ret)) {
2366  		cdns3_free_trb_pool(priv_ep);
2367  		ret =  -EINVAL;
2368  		goto exit;
2369  	}
2370  
2371  	/* enable interrupt for selected endpoint */
2372  	cdns3_set_register_bit(&priv_dev->regs->ep_ien,
2373  			       BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
2374  
2375  	if (priv_dev->dev_ver < DEV_VER_V2)
2376  		cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
2377  
2378  	writel(reg, &priv_dev->regs->ep_sts_en);
2379  
2380  	ep->desc = desc;
2381  	priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
2382  			    EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
2383  	priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
2384  	priv_ep->wa1_set = 0;
2385  	priv_ep->enqueue = 0;
2386  	priv_ep->dequeue = 0;
2387  	reg = readl(&priv_dev->regs->ep_sts);
2388  	priv_ep->pcs = !!EP_STS_CCS(reg);
2389  	priv_ep->ccs = !!EP_STS_CCS(reg);
2390  	/* one TRB is reserved for link TRB used in DMULT mode*/
2391  	priv_ep->free_trbs = priv_ep->num_trbs - 1;
2392  exit:
2393  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2394  
2395  	return ret;
2396  }
2397  
2398  /**
2399   * cdns3_gadget_ep_disable - Disable endpoint
2400   * @ep: endpoint object
2401   *
2402   * Returns 0 on success, error code elsewhere
2403   */
2404  static int cdns3_gadget_ep_disable(struct usb_ep *ep)
2405  {
2406  	struct cdns3_endpoint *priv_ep;
2407  	struct cdns3_request *priv_req;
2408  	struct cdns3_device *priv_dev;
2409  	struct usb_request *request;
2410  	unsigned long flags;
2411  	int ret = 0;
2412  	u32 ep_cfg;
2413  	int val;
2414  
2415  	if (!ep) {
2416  		pr_err("usbss: invalid parameters\n");
2417  		return -EINVAL;
2418  	}
2419  
2420  	priv_ep = ep_to_cdns3_ep(ep);
2421  	priv_dev = priv_ep->cdns3_dev;
2422  
2423  	if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
2424  			  "%s is already disabled\n", priv_ep->name))
2425  		return 0;
2426  
2427  	spin_lock_irqsave(&priv_dev->lock, flags);
2428  
2429  	trace_cdns3_gadget_ep_disable(priv_ep);
2430  
2431  	cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2432  
2433  	ep_cfg = readl(&priv_dev->regs->ep_cfg);
2434  	ep_cfg &= ~EP_CFG_ENABLE;
2435  	writel(ep_cfg, &priv_dev->regs->ep_cfg);
2436  
2437  	/**
2438  	 * Driver needs some time before resetting endpoint.
2439  	 * It need waits for clearing DBUSY bit or for timeout expired.
2440  	 * 10us is enough time for controller to stop transfer.
2441  	 */
2442  	readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
2443  				  !(val & EP_STS_DBUSY), 1, 10);
2444  	writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2445  
2446  	readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2447  				  !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
2448  				  1, 1000);
2449  	if (unlikely(ret))
2450  		dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
2451  			priv_ep->name);
2452  
2453  	while (!list_empty(&priv_ep->pending_req_list)) {
2454  		request = cdns3_next_request(&priv_ep->pending_req_list);
2455  
2456  		cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
2457  				      -ESHUTDOWN);
2458  	}
2459  
2460  	while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
2461  		priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
2462  
2463  		kfree(priv_req->request.buf);
2464  		cdns3_gadget_ep_free_request(&priv_ep->endpoint,
2465  					     &priv_req->request);
2466  		list_del_init(&priv_req->list);
2467  		--priv_ep->wa2_counter;
2468  	}
2469  
2470  	while (!list_empty(&priv_ep->deferred_req_list)) {
2471  		request = cdns3_next_request(&priv_ep->deferred_req_list);
2472  
2473  		cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
2474  				      -ESHUTDOWN);
2475  	}
2476  
2477  	priv_ep->descmis_req = NULL;
2478  
2479  	ep->desc = NULL;
2480  	priv_ep->flags &= ~EP_ENABLED;
2481  	priv_ep->use_streams = false;
2482  
2483  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2484  
2485  	return ret;
2486  }
2487  
2488  /**
2489   * __cdns3_gadget_ep_queue - Transfer data on endpoint
2490   * @ep: endpoint object
2491   * @request: request object
2492   * @gfp_flags: gfp flags
2493   *
2494   * Returns 0 on success, error code elsewhere
2495   */
2496  static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
2497  				   struct usb_request *request,
2498  				   gfp_t gfp_flags)
2499  {
2500  	struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2501  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2502  	struct cdns3_request *priv_req;
2503  	int ret = 0;
2504  
2505  	request->actual = 0;
2506  	request->status = -EINPROGRESS;
2507  	priv_req = to_cdns3_request(request);
2508  	trace_cdns3_ep_queue(priv_req);
2509  
2510  	if (priv_dev->dev_ver < DEV_VER_V2) {
2511  		ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
2512  						priv_req);
2513  
2514  		if (ret == EINPROGRESS)
2515  			return 0;
2516  	}
2517  
2518  	ret = cdns3_prepare_aligned_request_buf(priv_req);
2519  	if (ret < 0)
2520  		return ret;
2521  
2522  	ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
2523  					    usb_endpoint_dir_in(ep->desc));
2524  	if (ret)
2525  		return ret;
2526  
2527  	list_add_tail(&request->list, &priv_ep->deferred_req_list);
2528  
2529  	/*
2530  	 * For stream capable endpoint if prime irq flag is set then only start
2531  	 * request.
2532  	 * If hardware endpoint configuration has not been set yet then
2533  	 * just queue request in deferred list. Transfer will be started in
2534  	 * cdns3_set_hw_configuration.
2535  	 */
2536  	if (!request->stream_id) {
2537  		if (priv_dev->hw_configured_flag &&
2538  		    !(priv_ep->flags & EP_STALLED) &&
2539  		    !(priv_ep->flags & EP_STALL_PENDING))
2540  			cdns3_start_all_request(priv_dev, priv_ep);
2541  	} else {
2542  		if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
2543  			cdns3_start_all_request(priv_dev, priv_ep);
2544  	}
2545  
2546  	return 0;
2547  }
2548  
2549  static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2550  				 gfp_t gfp_flags)
2551  {
2552  	struct usb_request *zlp_request;
2553  	struct cdns3_endpoint *priv_ep;
2554  	struct cdns3_device *priv_dev;
2555  	unsigned long flags;
2556  	int ret;
2557  
2558  	if (!request || !ep)
2559  		return -EINVAL;
2560  
2561  	priv_ep = ep_to_cdns3_ep(ep);
2562  	priv_dev = priv_ep->cdns3_dev;
2563  
2564  	spin_lock_irqsave(&priv_dev->lock, flags);
2565  
2566  	ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
2567  
2568  	if (ret == 0 && request->zero && request->length &&
2569  	    (request->length % ep->maxpacket == 0)) {
2570  		struct cdns3_request *priv_req;
2571  
2572  		zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
2573  		zlp_request->buf = priv_dev->zlp_buf;
2574  		zlp_request->length = 0;
2575  
2576  		priv_req = to_cdns3_request(zlp_request);
2577  		priv_req->flags |= REQUEST_ZLP;
2578  
2579  		dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
2580  			priv_ep->name);
2581  		ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
2582  	}
2583  
2584  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2585  	return ret;
2586  }
2587  
2588  /**
2589   * cdns3_gadget_ep_dequeue - Remove request from transfer queue
2590   * @ep: endpoint object associated with request
2591   * @request: request object
2592   *
2593   * Returns 0 on success, error code elsewhere
2594   */
2595  int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
2596  			    struct usb_request *request)
2597  {
2598  	struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2599  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2600  	struct usb_request *req, *req_temp;
2601  	struct cdns3_request *priv_req;
2602  	struct cdns3_trb *link_trb;
2603  	u8 req_on_hw_ring = 0;
2604  	unsigned long flags;
2605  	int ret = 0;
2606  
2607  	if (!ep || !request || !ep->desc)
2608  		return -EINVAL;
2609  
2610  	spin_lock_irqsave(&priv_dev->lock, flags);
2611  
2612  	priv_req = to_cdns3_request(request);
2613  
2614  	trace_cdns3_ep_dequeue(priv_req);
2615  
2616  	cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2617  
2618  	list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
2619  				 list) {
2620  		if (request == req) {
2621  			req_on_hw_ring = 1;
2622  			goto found;
2623  		}
2624  	}
2625  
2626  	list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
2627  				 list) {
2628  		if (request == req)
2629  			goto found;
2630  	}
2631  
2632  	goto not_found;
2633  
2634  found:
2635  	link_trb = priv_req->trb;
2636  
2637  	/* Update ring only if removed request is on pending_req_list list */
2638  	if (req_on_hw_ring && link_trb) {
2639  		link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
2640  			((priv_req->end_trb + 1) * TRB_SIZE)));
2641  		link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
2642  				    TRB_TYPE(TRB_LINK) | TRB_CHAIN);
2643  
2644  		if (priv_ep->wa1_trb == priv_req->trb)
2645  			cdns3_wa1_restore_cycle_bit(priv_ep);
2646  	}
2647  
2648  	cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
2649  
2650  not_found:
2651  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2652  	return ret;
2653  }
2654  
2655  /**
2656   * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint
2657   * Should be called after acquiring spin_lock and selecting ep
2658   * @priv_ep: endpoint object to set stall on.
2659   */
2660  void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
2661  {
2662  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2663  
2664  	trace_cdns3_halt(priv_ep, 1, 0);
2665  
2666  	if (!(priv_ep->flags & EP_STALLED)) {
2667  		u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
2668  
2669  		if (!(ep_sts_reg & EP_STS_DBUSY))
2670  			cdns3_ep_stall_flush(priv_ep);
2671  		else
2672  			priv_ep->flags |= EP_STALL_PENDING;
2673  	}
2674  }
2675  
2676  /**
2677   * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint
2678   * Should be called after acquiring spin_lock and selecting ep
2679   * @priv_ep: endpoint object to clear stall on
2680   */
2681  int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
2682  {
2683  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2684  	struct usb_request *request;
2685  	struct cdns3_request *priv_req;
2686  	struct cdns3_trb *trb = NULL;
2687  	int ret;
2688  	int val;
2689  
2690  	trace_cdns3_halt(priv_ep, 0, 0);
2691  
2692  	request = cdns3_next_request(&priv_ep->pending_req_list);
2693  	if (request) {
2694  		priv_req = to_cdns3_request(request);
2695  		trb = priv_req->trb;
2696  		if (trb)
2697  			trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
2698  	}
2699  
2700  	writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2701  
2702  	/* wait for EPRST cleared */
2703  	ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2704  					!(val & EP_CMD_EPRST), 1, 100);
2705  	if (ret)
2706  		return -EINVAL;
2707  
2708  	priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
2709  
2710  	if (request) {
2711  		if (trb)
2712  			trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
2713  
2714  		cdns3_rearm_transfer(priv_ep, 1);
2715  	}
2716  
2717  	cdns3_start_all_request(priv_dev, priv_ep);
2718  	return ret;
2719  }
2720  
2721  /**
2722   * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint
2723   * @ep: endpoint object to set/clear stall on
2724   * @value: 1 for set stall, 0 for clear stall
2725   *
2726   * Returns 0 on success, error code elsewhere
2727   */
2728  int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2729  {
2730  	struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2731  	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2732  	unsigned long flags;
2733  	int ret = 0;
2734  
2735  	if (!(priv_ep->flags & EP_ENABLED))
2736  		return -EPERM;
2737  
2738  	spin_lock_irqsave(&priv_dev->lock, flags);
2739  
2740  	cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2741  
2742  	if (!value) {
2743  		priv_ep->flags &= ~EP_WEDGE;
2744  		ret = __cdns3_gadget_ep_clear_halt(priv_ep);
2745  	} else {
2746  		__cdns3_gadget_ep_set_halt(priv_ep);
2747  	}
2748  
2749  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2750  
2751  	return ret;
2752  }
2753  
2754  extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
2755  
2756  static const struct usb_ep_ops cdns3_gadget_ep_ops = {
2757  	.enable = cdns3_gadget_ep_enable,
2758  	.disable = cdns3_gadget_ep_disable,
2759  	.alloc_request = cdns3_gadget_ep_alloc_request,
2760  	.free_request = cdns3_gadget_ep_free_request,
2761  	.queue = cdns3_gadget_ep_queue,
2762  	.dequeue = cdns3_gadget_ep_dequeue,
2763  	.set_halt = cdns3_gadget_ep_set_halt,
2764  	.set_wedge = cdns3_gadget_ep_set_wedge,
2765  };
2766  
2767  /**
2768   * cdns3_gadget_get_frame - Returns number of actual ITP frame
2769   * @gadget: gadget object
2770   *
2771   * Returns number of actual ITP frame
2772   */
2773  static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
2774  {
2775  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2776  
2777  	return readl(&priv_dev->regs->usb_itpn);
2778  }
2779  
2780  int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
2781  {
2782  	enum usb_device_speed speed;
2783  
2784  	speed = cdns3_get_speed(priv_dev);
2785  
2786  	if (speed >= USB_SPEED_SUPER)
2787  		return 0;
2788  
2789  	/* Start driving resume signaling to indicate remote wakeup. */
2790  	writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
2791  
2792  	return 0;
2793  }
2794  
2795  static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
2796  {
2797  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2798  	unsigned long flags;
2799  	int ret = 0;
2800  
2801  	spin_lock_irqsave(&priv_dev->lock, flags);
2802  	ret = __cdns3_gadget_wakeup(priv_dev);
2803  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2804  	return ret;
2805  }
2806  
2807  static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
2808  					int is_selfpowered)
2809  {
2810  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2811  	unsigned long flags;
2812  
2813  	spin_lock_irqsave(&priv_dev->lock, flags);
2814  	priv_dev->is_selfpowered = !!is_selfpowered;
2815  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2816  	return 0;
2817  }
2818  
2819  static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
2820  {
2821  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2822  
2823  	if (is_on) {
2824  		writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
2825  	} else {
2826  		writel(~0, &priv_dev->regs->ep_ists);
2827  		writel(~0, &priv_dev->regs->usb_ists);
2828  		writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2829  	}
2830  
2831  	return 0;
2832  }
2833  
2834  static void cdns3_gadget_config(struct cdns3_device *priv_dev)
2835  {
2836  	struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
2837  	u32 reg;
2838  
2839  	cdns3_ep0_config(priv_dev);
2840  
2841  	/* enable interrupts for endpoint 0 (in and out) */
2842  	writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
2843  
2844  	/*
2845  	 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
2846  	 * revision of controller.
2847  	 */
2848  	if (priv_dev->dev_ver == DEV_VER_TI_V1) {
2849  		reg = readl(&regs->dbg_link1);
2850  
2851  		reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
2852  		reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
2853  		       DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
2854  		writel(reg, &regs->dbg_link1);
2855  	}
2856  
2857  	/*
2858  	 * By default some platforms has set protected access to memory.
2859  	 * This cause problem with cache, so driver restore non-secure
2860  	 * access to memory.
2861  	 */
2862  	reg = readl(&regs->dma_axi_ctrl);
2863  	reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
2864  	       DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
2865  	writel(reg, &regs->dma_axi_ctrl);
2866  
2867  	/* enable generic interrupt*/
2868  	writel(USB_IEN_INIT, &regs->usb_ien);
2869  	writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
2870  	/*  keep Fast Access bit */
2871  	writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr);
2872  
2873  	cdns3_configure_dmult(priv_dev, NULL);
2874  }
2875  
2876  /**
2877   * cdns3_gadget_udc_start - Gadget start
2878   * @gadget: gadget object
2879   * @driver: driver which operates on this gadget
2880   *
2881   * Returns 0 on success, error code elsewhere
2882   */
2883  static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
2884  				  struct usb_gadget_driver *driver)
2885  {
2886  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2887  	unsigned long flags;
2888  	enum usb_device_speed max_speed = driver->max_speed;
2889  
2890  	spin_lock_irqsave(&priv_dev->lock, flags);
2891  	priv_dev->gadget_driver = driver;
2892  
2893  	/* limit speed if necessary */
2894  	max_speed = min(driver->max_speed, gadget->max_speed);
2895  
2896  	switch (max_speed) {
2897  	case USB_SPEED_FULL:
2898  		writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
2899  		writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2900  		break;
2901  	case USB_SPEED_HIGH:
2902  		writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2903  		break;
2904  	case USB_SPEED_SUPER:
2905  		break;
2906  	default:
2907  		dev_err(priv_dev->dev,
2908  			"invalid maximum_speed parameter %d\n",
2909  			max_speed);
2910  		fallthrough;
2911  	case USB_SPEED_UNKNOWN:
2912  		/* default to superspeed */
2913  		max_speed = USB_SPEED_SUPER;
2914  		break;
2915  	}
2916  
2917  	cdns3_gadget_config(priv_dev);
2918  	spin_unlock_irqrestore(&priv_dev->lock, flags);
2919  	return 0;
2920  }
2921  
2922  /**
2923   * cdns3_gadget_udc_stop - Stops gadget
2924   * @gadget: gadget object
2925   *
2926   * Returns 0
2927   */
2928  static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
2929  {
2930  	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2931  	struct cdns3_endpoint *priv_ep;
2932  	u32 bEndpointAddress;
2933  	struct usb_ep *ep;
2934  	int val;
2935  
2936  	priv_dev->gadget_driver = NULL;
2937  
2938  	priv_dev->onchip_used_size = 0;
2939  	priv_dev->out_mem_is_allocated = 0;
2940  	priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2941  
2942  	list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2943  		priv_ep = ep_to_cdns3_ep(ep);
2944  		bEndpointAddress = priv_ep->num | priv_ep->dir;
2945  		cdns3_select_ep(priv_dev, bEndpointAddress);
2946  		writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2947  		readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2948  					  !(val & EP_CMD_EPRST), 1, 100);
2949  
2950  		priv_ep->flags &= ~EP_CLAIMED;
2951  	}
2952  
2953  	/* disable interrupt for device */
2954  	writel(0, &priv_dev->regs->usb_ien);
2955  	writel(0, &priv_dev->regs->usb_pwr);
2956  	writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2957  
2958  	return 0;
2959  }
2960  
2961  static const struct usb_gadget_ops cdns3_gadget_ops = {
2962  	.get_frame = cdns3_gadget_get_frame,
2963  	.wakeup = cdns3_gadget_wakeup,
2964  	.set_selfpowered = cdns3_gadget_set_selfpowered,
2965  	.pullup = cdns3_gadget_pullup,
2966  	.udc_start = cdns3_gadget_udc_start,
2967  	.udc_stop = cdns3_gadget_udc_stop,
2968  	.match_ep = cdns3_gadget_match_ep,
2969  };
2970  
2971  static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
2972  {
2973  	int i;
2974  
2975  	/* ep0 OUT point to ep0 IN. */
2976  	priv_dev->eps[16] = NULL;
2977  
2978  	for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
2979  		if (priv_dev->eps[i]) {
2980  			cdns3_free_trb_pool(priv_dev->eps[i]);
2981  			devm_kfree(priv_dev->dev, priv_dev->eps[i]);
2982  		}
2983  }
2984  
2985  /**
2986   * cdns3_init_eps - Initializes software endpoints of gadget
2987   * @priv_dev: extended gadget object
2988   *
2989   * Returns 0 on success, error code elsewhere
2990   */
2991  static int cdns3_init_eps(struct cdns3_device *priv_dev)
2992  {
2993  	u32 ep_enabled_reg, iso_ep_reg;
2994  	struct cdns3_endpoint *priv_ep;
2995  	int ep_dir, ep_number;
2996  	u32 ep_mask;
2997  	int ret = 0;
2998  	int i;
2999  
3000  	/* Read it from USB_CAP3 to USB_CAP5 */
3001  	ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
3002  	iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
3003  
3004  	dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
3005  
3006  	for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
3007  		ep_dir = i >> 4;	/* i div 16 */
3008  		ep_number = i & 0xF;	/* i % 16 */
3009  		ep_mask = BIT(i);
3010  
3011  		if (!(ep_enabled_reg & ep_mask))
3012  			continue;
3013  
3014  		if (ep_dir && !ep_number) {
3015  			priv_dev->eps[i] = priv_dev->eps[0];
3016  			continue;
3017  		}
3018  
3019  		priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
3020  				       GFP_KERNEL);
3021  		if (!priv_ep)
3022  			goto err;
3023  
3024  		/* set parent of endpoint object */
3025  		priv_ep->cdns3_dev = priv_dev;
3026  		priv_dev->eps[i] = priv_ep;
3027  		priv_ep->num = ep_number;
3028  		priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
3029  
3030  		if (!ep_number) {
3031  			ret = cdns3_init_ep0(priv_dev, priv_ep);
3032  			if (ret) {
3033  				dev_err(priv_dev->dev, "Failed to init ep0\n");
3034  				goto err;
3035  			}
3036  		} else {
3037  			snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
3038  				 ep_number, !!ep_dir ? "in" : "out");
3039  			priv_ep->endpoint.name = priv_ep->name;
3040  
3041  			usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
3042  						   CDNS3_EP_MAX_PACKET_LIMIT);
3043  			priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
3044  			priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
3045  			if (ep_dir)
3046  				priv_ep->endpoint.caps.dir_in = 1;
3047  			else
3048  				priv_ep->endpoint.caps.dir_out = 1;
3049  
3050  			if (iso_ep_reg & ep_mask)
3051  				priv_ep->endpoint.caps.type_iso = 1;
3052  
3053  			priv_ep->endpoint.caps.type_bulk = 1;
3054  			priv_ep->endpoint.caps.type_int = 1;
3055  
3056  			list_add_tail(&priv_ep->endpoint.ep_list,
3057  				      &priv_dev->gadget.ep_list);
3058  		}
3059  
3060  		priv_ep->flags = 0;
3061  
3062  		dev_dbg(priv_dev->dev, "Initialized  %s support: %s %s\n",
3063  			 priv_ep->name,
3064  			 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
3065  			 priv_ep->endpoint.caps.type_iso ? "ISO" : "");
3066  
3067  		INIT_LIST_HEAD(&priv_ep->pending_req_list);
3068  		INIT_LIST_HEAD(&priv_ep->deferred_req_list);
3069  		INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
3070  	}
3071  
3072  	return 0;
3073  err:
3074  	cdns3_free_all_eps(priv_dev);
3075  	return -ENOMEM;
3076  }
3077  
3078  static void cdns3_gadget_release(struct device *dev)
3079  {
3080  	struct cdns3_device *priv_dev = container_of(dev,
3081  			struct cdns3_device, gadget.dev);
3082  
3083  	kfree(priv_dev);
3084  }
3085  
3086  static void cdns3_gadget_exit(struct cdns *cdns)
3087  {
3088  	struct cdns3_device *priv_dev;
3089  
3090  	priv_dev = cdns->gadget_dev;
3091  
3092  
3093  	pm_runtime_mark_last_busy(cdns->dev);
3094  	pm_runtime_put_autosuspend(cdns->dev);
3095  
3096  	usb_del_gadget(&priv_dev->gadget);
3097  	devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
3098  
3099  	cdns3_free_all_eps(priv_dev);
3100  
3101  	while (!list_empty(&priv_dev->aligned_buf_list)) {
3102  		struct cdns3_aligned_buf *buf;
3103  
3104  		buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
3105  		dma_free_noncoherent(priv_dev->sysdev, buf->size,
3106  				  buf->buf,
3107  				  buf->dma,
3108  				  buf->dir);
3109  
3110  		list_del(&buf->list);
3111  		kfree(buf);
3112  	}
3113  
3114  	dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
3115  			  priv_dev->setup_dma);
3116  	dma_pool_destroy(priv_dev->eps_dma_pool);
3117  
3118  	kfree(priv_dev->zlp_buf);
3119  	usb_put_gadget(&priv_dev->gadget);
3120  	cdns->gadget_dev = NULL;
3121  	cdns_drd_gadget_off(cdns);
3122  }
3123  
3124  static int cdns3_gadget_start(struct cdns *cdns)
3125  {
3126  	struct cdns3_device *priv_dev;
3127  	u32 max_speed;
3128  	int ret;
3129  
3130  	priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
3131  	if (!priv_dev)
3132  		return -ENOMEM;
3133  
3134  	usb_initialize_gadget(cdns->dev, &priv_dev->gadget,
3135  			cdns3_gadget_release);
3136  	cdns->gadget_dev = priv_dev;
3137  	priv_dev->sysdev = cdns->dev;
3138  	priv_dev->dev = cdns->dev;
3139  	priv_dev->regs = cdns->dev_regs;
3140  
3141  	device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
3142  				 &priv_dev->onchip_buffers);
3143  
3144  	if (priv_dev->onchip_buffers <=  0) {
3145  		u32 reg = readl(&priv_dev->regs->usb_cap2);
3146  
3147  		priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
3148  	}
3149  
3150  	if (!priv_dev->onchip_buffers)
3151  		priv_dev->onchip_buffers = 256;
3152  
3153  	max_speed = usb_get_maximum_speed(cdns->dev);
3154  
3155  	/* Check the maximum_speed parameter */
3156  	switch (max_speed) {
3157  	case USB_SPEED_FULL:
3158  	case USB_SPEED_HIGH:
3159  	case USB_SPEED_SUPER:
3160  		break;
3161  	default:
3162  		dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
3163  			max_speed);
3164  		fallthrough;
3165  	case USB_SPEED_UNKNOWN:
3166  		/* default to superspeed */
3167  		max_speed = USB_SPEED_SUPER;
3168  		break;
3169  	}
3170  
3171  	/* fill gadget fields */
3172  	priv_dev->gadget.max_speed = max_speed;
3173  	priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
3174  	priv_dev->gadget.ops = &cdns3_gadget_ops;
3175  	priv_dev->gadget.name = "usb-ss-gadget";
3176  	priv_dev->gadget.quirk_avoids_skb_reserve = 1;
3177  	priv_dev->gadget.irq = cdns->dev_irq;
3178  
3179  	spin_lock_init(&priv_dev->lock);
3180  	INIT_WORK(&priv_dev->pending_status_wq,
3181  		  cdns3_pending_setup_status_handler);
3182  
3183  	INIT_WORK(&priv_dev->aligned_buf_wq,
3184  		  cdns3_free_aligned_request_buf);
3185  
3186  	/* initialize endpoint container */
3187  	INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
3188  	INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
3189  	priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool",
3190  						 priv_dev->sysdev,
3191  						 TRB_RING_SIZE, 8, 0);
3192  	if (!priv_dev->eps_dma_pool) {
3193  		dev_err(priv_dev->dev, "Failed to create TRB dma pool\n");
3194  		ret = -ENOMEM;
3195  		goto err1;
3196  	}
3197  
3198  	ret = cdns3_init_eps(priv_dev);
3199  	if (ret) {
3200  		dev_err(priv_dev->dev, "Failed to create endpoints\n");
3201  		goto err1;
3202  	}
3203  
3204  	/* allocate memory for setup packet buffer */
3205  	priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
3206  						 &priv_dev->setup_dma, GFP_DMA);
3207  	if (!priv_dev->setup_buf) {
3208  		ret = -ENOMEM;
3209  		goto err2;
3210  	}
3211  
3212  	priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
3213  
3214  	dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
3215  		readl(&priv_dev->regs->usb_cap6));
3216  	dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
3217  		readl(&priv_dev->regs->usb_cap1));
3218  	dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n",
3219  		readl(&priv_dev->regs->usb_cap2));
3220  
3221  	priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
3222  	if (priv_dev->dev_ver >= DEV_VER_V2)
3223  		priv_dev->gadget.sg_supported = 1;
3224  
3225  	priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
3226  	if (!priv_dev->zlp_buf) {
3227  		ret = -ENOMEM;
3228  		goto err3;
3229  	}
3230  
3231  	/* add USB gadget device */
3232  	ret = usb_add_gadget(&priv_dev->gadget);
3233  	if (ret < 0) {
3234  		dev_err(priv_dev->dev, "Failed to add gadget\n");
3235  		goto err4;
3236  	}
3237  
3238  	return 0;
3239  err4:
3240  	kfree(priv_dev->zlp_buf);
3241  err3:
3242  	dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
3243  			  priv_dev->setup_dma);
3244  err2:
3245  	cdns3_free_all_eps(priv_dev);
3246  err1:
3247  	dma_pool_destroy(priv_dev->eps_dma_pool);
3248  
3249  	usb_put_gadget(&priv_dev->gadget);
3250  	cdns->gadget_dev = NULL;
3251  	return ret;
3252  }
3253  
3254  static int __cdns3_gadget_init(struct cdns *cdns)
3255  {
3256  	int ret = 0;
3257  
3258  	/* Ensure 32-bit DMA Mask in case we switched back from Host mode */
3259  	ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
3260  	if (ret) {
3261  		dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
3262  		return ret;
3263  	}
3264  
3265  	cdns_drd_gadget_on(cdns);
3266  	pm_runtime_get_sync(cdns->dev);
3267  
3268  	ret = cdns3_gadget_start(cdns);
3269  	if (ret) {
3270  		pm_runtime_put_sync(cdns->dev);
3271  		return ret;
3272  	}
3273  
3274  	/*
3275  	 * Because interrupt line can be shared with other components in
3276  	 * driver it can't use IRQF_ONESHOT flag here.
3277  	 */
3278  	ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
3279  					cdns3_device_irq_handler,
3280  					cdns3_device_thread_irq_handler,
3281  					IRQF_SHARED, dev_name(cdns->dev),
3282  					cdns->gadget_dev);
3283  
3284  	if (ret)
3285  		goto err0;
3286  
3287  	return 0;
3288  err0:
3289  	cdns3_gadget_exit(cdns);
3290  	return ret;
3291  }
3292  
3293  static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
3294  __must_hold(&cdns->lock)
3295  {
3296  	struct cdns3_device *priv_dev = cdns->gadget_dev;
3297  
3298  	spin_unlock(&cdns->lock);
3299  	cdns3_disconnect_gadget(priv_dev);
3300  	spin_lock(&cdns->lock);
3301  
3302  	priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
3303  	usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
3304  	cdns3_hw_reset_eps_config(priv_dev);
3305  
3306  	/* disable interrupt for device */
3307  	writel(0, &priv_dev->regs->usb_ien);
3308  
3309  	return 0;
3310  }
3311  
3312  static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
3313  {
3314  	struct cdns3_device *priv_dev = cdns->gadget_dev;
3315  
3316  	if (!priv_dev->gadget_driver)
3317  		return 0;
3318  
3319  	cdns3_gadget_config(priv_dev);
3320  	if (hibernated)
3321  		writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
3322  
3323  	return 0;
3324  }
3325  
3326  /**
3327   * cdns3_gadget_init - initialize device structure
3328   *
3329   * @cdns: cdns instance
3330   *
3331   * This function initializes the gadget.
3332   */
3333  int cdns3_gadget_init(struct cdns *cdns)
3334  {
3335  	struct cdns_role_driver *rdrv;
3336  
3337  	rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
3338  	if (!rdrv)
3339  		return -ENOMEM;
3340  
3341  	rdrv->start	= __cdns3_gadget_init;
3342  	rdrv->stop	= cdns3_gadget_exit;
3343  	rdrv->suspend	= cdns3_gadget_suspend;
3344  	rdrv->resume	= cdns3_gadget_resume;
3345  	rdrv->state	= CDNS_ROLE_STATE_INACTIVE;
3346  	rdrv->name	= "gadget";
3347  	cdns->roles[USB_ROLE_DEVICE] = rdrv;
3348  
3349  	return 0;
3350  }
3351