xref: /linux/drivers/usb/cdns3/cdnsp-ring.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence CDNSP DRD Driver.
4  *
5  * Copyright (C) 2020 Cadence.
6  *
7  * Author: Pawel Laszczak <pawell@cadence.com>
8  *
9  * Code based on Linux XHCI driver.
10  * Origin: Copyright (C) 2008 Intel Corp
11  */
12 
13 /*
14  * Ring initialization rules:
15  * 1. Each segment is initialized to zero, except for link TRBs.
16  * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
17  *    Consumer Cycle State (CCS), depending on ring function.
18  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
19  *
20  * Ring behavior rules:
21  * 1. A ring is empty if enqueue == dequeue. This means there will always be at
22  *    least one free TRB in the ring. This is useful if you want to turn that
23  *    into a link TRB and expand the ring.
24  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
25  *    link TRB, then load the pointer with the address in the link TRB. If the
26  *    link TRB had its toggle bit set, you may need to update the ring cycle
27  *    state (see cycle bit rules). You may have to do this multiple times
28  *    until you reach a non-link TRB.
29  * 3. A ring is full if enqueue++ (for the definition of increment above)
30  *    equals the dequeue pointer.
31  *
32  * Cycle bit rules:
33  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
34  *    in a link TRB, it must toggle the ring cycle state.
35  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
36  *    in a link TRB, it must toggle the ring cycle state.
37  *
38  * Producer rules:
39  * 1. Check if ring is full before you enqueue.
40  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
41  *    Update enqueue pointer between each write (which may update the ring
42  *    cycle state).
43  * 3. Notify consumer. If SW is producer, it rings the doorbell for command
44  *    and endpoint rings. If controller is the producer for the event ring,
45  *    and it generates an interrupt according to interrupt modulation rules.
46  *
47  * Consumer rules:
48  * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
49  *    the TRB is owned by the consumer.
50  * 2. Update dequeue pointer (which may update the ring cycle state) and
51  *    continue processing TRBs until you reach a TRB which is not owned by you.
52  * 3. Notify the producer. SW is the consumer for the event ring, and it
53  *    updates event ring dequeue pointer. Controller is the consumer for the
54  *    command and endpoint rings; it generates events on the event ring
55  *    for these.
56  */
57 
58 #include <linux/scatterlist.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/delay.h>
61 #include <linux/slab.h>
62 #include <linux/irq.h>
63 
64 #include "cdnsp-trace.h"
65 #include "cdnsp-gadget.h"
66 
67 /*
68  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
69  * address of the TRB.
70  */
71 dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
72 				 union cdnsp_trb *trb)
73 {
74 	unsigned long segment_offset = trb - seg->trbs;
75 
76 	if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
77 		return 0;
78 
79 	return seg->dma + (segment_offset * sizeof(*trb));
80 }
81 
82 static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
83 {
84 	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
85 }
86 
87 static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
88 {
89 	return TRB_TYPE_LINK_LE32(trb->link.control);
90 }
91 
92 bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
93 {
94 	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
95 }
96 
97 bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
98 			    struct cdnsp_segment *seg,
99 			    union cdnsp_trb *trb)
100 {
101 	return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102 }
103 
104 static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
105 {
106 	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107 }
108 
109 static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
110 {
111 	if (cdnsp_trb_is_link(trb)) {
112 		/* Unchain chained link TRBs. */
113 		trb->link.control &= cpu_to_le32(~TRB_CHAIN);
114 	} else {
115 		trb->generic.field[0] = 0;
116 		trb->generic.field[1] = 0;
117 		trb->generic.field[2] = 0;
118 		/* Preserve only the cycle bit of this TRB. */
119 		trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
120 		trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
121 	}
122 }
123 
124 /*
125  * Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment. This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void cdnsp_next_trb(struct cdnsp_device *pdev,
130 			   struct cdnsp_ring *ring,
131 			   struct cdnsp_segment **seg,
132 			   union cdnsp_trb **trb)
133 {
134 	if (cdnsp_trb_is_link(*trb)) {
135 		*seg = (*seg)->next;
136 		*trb = ((*seg)->trbs);
137 	} else {
138 		(*trb)++;
139 	}
140 }
141 
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
145  */
146 void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
147 {
148 	/* event ring doesn't have link trbs, check for last trb. */
149 	if (ring->type == TYPE_EVENT) {
150 		if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
151 			ring->dequeue++;
152 			goto out;
153 		}
154 
155 		if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
156 			ring->cycle_state ^= 1;
157 
158 		ring->deq_seg = ring->deq_seg->next;
159 		ring->dequeue = ring->deq_seg->trbs;
160 		goto out;
161 	}
162 
163 	/* All other rings have link trbs. */
164 	if (!cdnsp_trb_is_link(ring->dequeue)) {
165 		ring->dequeue++;
166 		ring->num_trbs_free++;
167 	}
168 	while (cdnsp_trb_is_link(ring->dequeue)) {
169 		ring->deq_seg = ring->deq_seg->next;
170 		ring->dequeue = ring->deq_seg->trbs;
171 	}
172 out:
173 	trace_cdnsp_inc_deq(ring);
174 }
175 
176 /*
177  * See Cycle bit rules. SW is the consumer for the event ring only.
178  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
179  *
180  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
181  * chain bit is set), then set the chain bit in all the following link TRBs.
182  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
183  * have their chain bit cleared (so that each Link TRB is a separate TD).
184  *
185  * @more_trbs_coming:	Will you enqueue more TRBs before ringing the doorbell.
186  */
187 static void cdnsp_inc_enq(struct cdnsp_device *pdev,
188 			  struct cdnsp_ring *ring,
189 			  bool more_trbs_coming)
190 {
191 	union cdnsp_trb *next;
192 	u32 chain;
193 
194 	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195 
196 	/* If this is not event ring, there is one less usable TRB. */
197 	if (!cdnsp_trb_is_link(ring->enqueue))
198 		ring->num_trbs_free--;
199 	next = ++(ring->enqueue);
200 
201 	/* Update the dequeue pointer further if that was a link TRB */
202 	while (cdnsp_trb_is_link(next)) {
203 		/*
204 		 * If the caller doesn't plan on enqueuing more TDs before
205 		 * ringing the doorbell, then we don't want to give the link TRB
206 		 * to the hardware just yet. We'll give the link TRB back in
207 		 * cdnsp_prepare_ring() just before we enqueue the TD at the
208 		 * top of the ring.
209 		 */
210 		if (!chain && !more_trbs_coming)
211 			break;
212 
213 		next->link.control &= cpu_to_le32(~TRB_CHAIN);
214 		next->link.control |= cpu_to_le32(chain);
215 
216 		/* Give this link TRB to the hardware */
217 		wmb();
218 		next->link.control ^= cpu_to_le32(TRB_CYCLE);
219 
220 		/* Toggle the cycle bit after the last ring segment. */
221 		if (cdnsp_link_trb_toggles_cycle(next))
222 			ring->cycle_state ^= 1;
223 
224 		ring->enq_seg = ring->enq_seg->next;
225 		ring->enqueue = ring->enq_seg->trbs;
226 		next = ring->enqueue;
227 	}
228 
229 	trace_cdnsp_inc_enq(ring);
230 }
231 
232 /*
233  * Check to see if there's room to enqueue num_trbs on the ring and make sure
234  * enqueue pointer will not advance into dequeue segment.
235  */
236 static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
237 			       struct cdnsp_ring *ring,
238 			       unsigned int num_trbs)
239 {
240 	int num_trbs_in_deq_seg;
241 
242 	if (ring->num_trbs_free < num_trbs)
243 		return false;
244 
245 	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
246 		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
247 
248 		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
249 			return false;
250 	}
251 
252 	return true;
253 }
254 
255 /*
256  * Workaround for L1: controller has issue with resuming from L1 after
257  * setting doorbell for endpoint during L1 state. This function forces
258  * resume signal in such case.
259  */
260 static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
261 {
262 	if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
263 		cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
264 }
265 
266 /* Ring the doorbell after placing a command on the ring. */
267 void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
268 {
269 	writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
270 }
271 
272 /*
273  * Ring the doorbell after placing a transfer on the ring.
274  * Returns true if doorbell was set, otherwise false.
275  */
276 static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
277 				   struct cdnsp_ep *pep,
278 				   unsigned int stream_id)
279 {
280 	__le32 __iomem *reg_addr = &pdev->dba->ep_db;
281 	unsigned int ep_state = pep->ep_state;
282 	unsigned int db_value;
283 
284 	/*
285 	 * Don't ring the doorbell for this endpoint if endpoint is halted or
286 	 * disabled.
287 	 */
288 	if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
289 		return false;
290 
291 	/* For stream capable endpoints driver can ring doorbell only twice. */
292 	if (pep->ep_state & EP_HAS_STREAMS) {
293 		if (pep->stream_info.drbls_count >= 2)
294 			return false;
295 
296 		pep->stream_info.drbls_count++;
297 	}
298 
299 	pep->ep_state &= ~EP_STOPPED;
300 
301 	if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
302 	    !pdev->ep0_expect_in)
303 		db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
304 	else
305 		db_value = DB_VALUE(pep->idx, stream_id);
306 
307 	trace_cdnsp_tr_drbl(pep, stream_id);
308 
309 	writel(db_value, reg_addr);
310 
311 	cdnsp_force_l0_go(pdev);
312 
313 	/* Doorbell was set. */
314 	return true;
315 }
316 
317 /*
318  * Get the right ring for the given pep and stream_id.
319  * If the endpoint supports streams, boundary check the USB request's stream ID.
320  * If the endpoint doesn't support streams, return the singular endpoint ring.
321  */
322 static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
323 						  struct cdnsp_ep *pep,
324 						  unsigned int stream_id)
325 {
326 	if (!(pep->ep_state & EP_HAS_STREAMS))
327 		return pep->ring;
328 
329 	if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
330 		dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
331 			pep->name, stream_id);
332 		return NULL;
333 	}
334 
335 	return pep->stream_info.stream_rings[stream_id];
336 }
337 
338 static struct cdnsp_ring *
339 	cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
340 				       struct cdnsp_request *preq)
341 {
342 	return cdnsp_get_transfer_ring(pdev, preq->pep,
343 				       preq->request.stream_id);
344 }
345 
346 /* Ring the doorbell for any rings with pending requests. */
347 void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
348 					  struct cdnsp_ep *pep)
349 {
350 	struct cdnsp_stream_info *stream_info;
351 	unsigned int stream_id;
352 	int ret;
353 
354 	if (pep->ep_state & EP_DIS_IN_RROGRESS)
355 		return;
356 
357 	/* A ring has pending Request if its TD list is not empty. */
358 	if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
359 		if (pep->ring && !list_empty(&pep->ring->td_list))
360 			cdnsp_ring_ep_doorbell(pdev, pep, 0);
361 		return;
362 	}
363 
364 	stream_info = &pep->stream_info;
365 
366 	for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
367 		struct cdnsp_td *td, *td_temp;
368 		struct cdnsp_ring *ep_ring;
369 
370 		if (stream_info->drbls_count >= 2)
371 			return;
372 
373 		ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
374 		if (!ep_ring)
375 			continue;
376 
377 		if (!ep_ring->stream_active || ep_ring->stream_rejected)
378 			continue;
379 
380 		list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
381 					 td_list) {
382 			if (td->drbl)
383 				continue;
384 
385 			ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
386 			if (ret)
387 				td->drbl = 1;
388 		}
389 	}
390 }
391 
392 /*
393  * Get the hw dequeue pointer controller stopped on, either directly from the
394  * endpoint context, or if streams are in use from the stream context.
395  * The returned hw_dequeue contains the lowest four bits with cycle state
396  * and possible stream context type.
397  */
398 static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
399 			    unsigned int ep_index,
400 			    unsigned int stream_id)
401 {
402 	struct cdnsp_stream_ctx *st_ctx;
403 	struct cdnsp_ep *pep;
404 
405 	pep = &pdev->eps[stream_id];
406 
407 	if (pep->ep_state & EP_HAS_STREAMS) {
408 		st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
409 		return le64_to_cpu(st_ctx->stream_ring);
410 	}
411 
412 	return le64_to_cpu(pep->out_ctx->deq);
413 }
414 
415 /*
416  * Move the controller endpoint ring dequeue pointer past cur_td.
417  * Record the new state of the controller endpoint ring dequeue segment,
418  * dequeue pointer, and new consumer cycle state in state.
419  * Update internal representation of the ring's dequeue pointer.
420  *
421  * We do this in three jumps:
422  *  - First we update our new ring state to be the same as when the
423  *    controller stopped.
424  *  - Then we traverse the ring to find the segment that contains
425  *    the last TRB in the TD. We toggle the controller new cycle state
426  *    when we pass any link TRBs with the toggle cycle bit set.
427  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
428  *    if we've moved it past a link TRB with the toggle cycle bit set.
429  */
430 static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
431 					 struct cdnsp_ep *pep,
432 					 unsigned int stream_id,
433 					 struct cdnsp_td *cur_td,
434 					 struct cdnsp_dequeue_state *state)
435 {
436 	bool td_last_trb_found = false;
437 	struct cdnsp_segment *new_seg;
438 	struct cdnsp_ring *ep_ring;
439 	union cdnsp_trb *new_deq;
440 	bool cycle_found = false;
441 	u64 hw_dequeue;
442 
443 	ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
444 	if (!ep_ring)
445 		return;
446 
447 	/*
448 	 * Dig out the cycle state saved by the controller during the
449 	 * stop endpoint command.
450 	 */
451 	hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
452 	new_seg = ep_ring->deq_seg;
453 	new_deq = ep_ring->dequeue;
454 	state->new_cycle_state = hw_dequeue & 0x1;
455 	state->stream_id = stream_id;
456 
457 	/*
458 	 * We want to find the pointer, segment and cycle state of the new trb
459 	 * (the one after current TD's last_trb). We know the cycle state at
460 	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
461 	 * found.
462 	 */
463 	do {
464 		if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
465 		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
466 			cycle_found = true;
467 
468 			if (td_last_trb_found)
469 				break;
470 		}
471 
472 		if (new_deq == cur_td->last_trb)
473 			td_last_trb_found = true;
474 
475 		if (cycle_found && cdnsp_trb_is_link(new_deq) &&
476 		    cdnsp_link_trb_toggles_cycle(new_deq))
477 			state->new_cycle_state ^= 0x1;
478 
479 		cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
480 
481 		/* Search wrapped around, bail out. */
482 		if (new_deq == pep->ring->dequeue) {
483 			dev_err(pdev->dev,
484 				"Error: Failed finding new dequeue state\n");
485 			state->new_deq_seg = NULL;
486 			state->new_deq_ptr = NULL;
487 			return;
488 		}
489 
490 	} while (!cycle_found || !td_last_trb_found);
491 
492 	state->new_deq_seg = new_seg;
493 	state->new_deq_ptr = new_deq;
494 
495 	trace_cdnsp_new_deq_state(state);
496 }
497 
498 /*
499  * flip_cycle means flip the cycle bit of all but the first and last TRB.
500  * (The last TRB actually points to the ring enqueue pointer, which is not part
501  * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
502  */
503 static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
504 			     struct cdnsp_ring *ep_ring,
505 			     struct cdnsp_td *td,
506 			     bool flip_cycle)
507 {
508 	struct cdnsp_segment *seg = td->start_seg;
509 	union cdnsp_trb *trb = td->first_trb;
510 
511 	while (1) {
512 		cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
513 
514 		/* flip cycle if asked to */
515 		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
516 			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
517 
518 		if (trb == td->last_trb)
519 			break;
520 
521 		cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
522 	}
523 }
524 
525 /*
526  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
527  * at end_trb, which may be in another segment. If the suspect DMA address is a
528  * TRB in this TD, this function returns that TRB's segment. Otherwise it
529  * returns 0.
530  */
531 static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
532 					     struct cdnsp_segment *start_seg,
533 					     union cdnsp_trb *start_trb,
534 					     union cdnsp_trb *end_trb,
535 					     dma_addr_t suspect_dma)
536 {
537 	struct cdnsp_segment *cur_seg;
538 	union cdnsp_trb *temp_trb;
539 	dma_addr_t end_seg_dma;
540 	dma_addr_t end_trb_dma;
541 	dma_addr_t start_dma;
542 
543 	start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
544 	cur_seg = start_seg;
545 
546 	do {
547 		if (start_dma == 0)
548 			return NULL;
549 
550 		temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
551 		/* We may get an event for a Link TRB in the middle of a TD */
552 		end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
553 		/* If the end TRB isn't in this segment, this is set to 0 */
554 		end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
555 
556 		trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
557 					      end_trb_dma, cur_seg->dma,
558 					      end_seg_dma);
559 
560 		if (end_trb_dma > 0) {
561 			/*
562 			 * The end TRB is in this segment, so suspect should
563 			 * be here
564 			 */
565 			if (start_dma <= end_trb_dma) {
566 				if (suspect_dma >= start_dma &&
567 				    suspect_dma <= end_trb_dma) {
568 					return cur_seg;
569 				}
570 			} else {
571 				/*
572 				 * Case for one segment with a
573 				 * TD wrapped around to the top
574 				 */
575 				if ((suspect_dma >= start_dma &&
576 				     suspect_dma <= end_seg_dma) ||
577 				    (suspect_dma >= cur_seg->dma &&
578 				     suspect_dma <= end_trb_dma)) {
579 					return cur_seg;
580 				}
581 			}
582 
583 			return NULL;
584 		}
585 
586 		/* Might still be somewhere in this segment */
587 		if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
588 			return cur_seg;
589 
590 		cur_seg = cur_seg->next;
591 		start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
592 	} while (cur_seg != start_seg);
593 
594 	return NULL;
595 }
596 
597 static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
598 					 struct cdnsp_ring *ring,
599 					 struct cdnsp_td *td)
600 {
601 	struct cdnsp_segment *seg = td->bounce_seg;
602 	struct cdnsp_request *preq;
603 	size_t len;
604 
605 	if (!seg)
606 		return;
607 
608 	preq = td->preq;
609 
610 	trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
611 				 seg->bounce_dma, 0);
612 
613 	if (!preq->direction) {
614 		dma_unmap_single(pdev->dev, seg->bounce_dma,
615 				 ring->bounce_buf_len,  DMA_TO_DEVICE);
616 		return;
617 	}
618 
619 	dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
620 			 DMA_FROM_DEVICE);
621 
622 	/* For in transfers we need to copy the data from bounce to sg */
623 	len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
624 				   seg->bounce_buf, seg->bounce_len,
625 				   seg->bounce_offs);
626 	if (len != seg->bounce_len)
627 		dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
628 			 len, seg->bounce_len);
629 
630 	seg->bounce_len = 0;
631 	seg->bounce_offs = 0;
632 }
633 
634 static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
635 			     struct cdnsp_ep *pep,
636 			     struct cdnsp_dequeue_state *deq_state)
637 {
638 	struct cdnsp_ring *ep_ring;
639 	int ret;
640 
641 	if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
642 		cdnsp_ring_doorbell_for_active_rings(pdev, pep);
643 		return 0;
644 	}
645 
646 	cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
647 	cdnsp_ring_cmd_db(pdev);
648 	ret = cdnsp_wait_for_cmd_compl(pdev);
649 
650 	trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
651 	trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
652 
653 	/*
654 	 * Update the ring's dequeue segment and dequeue pointer
655 	 * to reflect the new position.
656 	 */
657 	ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
658 
659 	if (cdnsp_trb_is_link(ep_ring->dequeue)) {
660 		ep_ring->deq_seg = ep_ring->deq_seg->next;
661 		ep_ring->dequeue = ep_ring->deq_seg->trbs;
662 	}
663 
664 	while (ep_ring->dequeue != deq_state->new_deq_ptr) {
665 		ep_ring->num_trbs_free++;
666 		ep_ring->dequeue++;
667 
668 		if (cdnsp_trb_is_link(ep_ring->dequeue)) {
669 			if (ep_ring->dequeue == deq_state->new_deq_ptr)
670 				break;
671 
672 			ep_ring->deq_seg = ep_ring->deq_seg->next;
673 			ep_ring->dequeue = ep_ring->deq_seg->trbs;
674 		}
675 	}
676 
677 	/*
678 	 * Probably there was TIMEOUT during handling Set Dequeue Pointer
679 	 * command. It's critical error and controller will be stopped.
680 	 */
681 	if (ret)
682 		return -ESHUTDOWN;
683 
684 	/* Restart any rings with pending requests */
685 	cdnsp_ring_doorbell_for_active_rings(pdev, pep);
686 
687 	return 0;
688 }
689 
690 int cdnsp_remove_request(struct cdnsp_device *pdev,
691 			 struct cdnsp_request *preq,
692 			 struct cdnsp_ep *pep)
693 {
694 	struct cdnsp_dequeue_state deq_state;
695 	struct cdnsp_td *cur_td = NULL;
696 	struct cdnsp_ring *ep_ring;
697 	struct cdnsp_segment *seg;
698 	int status = -ECONNRESET;
699 	int ret = 0;
700 	u64 hw_deq;
701 
702 	memset(&deq_state, 0, sizeof(deq_state));
703 
704 	trace_cdnsp_remove_request(pep->out_ctx);
705 	trace_cdnsp_remove_request_td(preq);
706 
707 	cur_td = &preq->td;
708 	ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
709 
710 	/*
711 	 * If we stopped on the TD we need to cancel, then we have to
712 	 * move the controller endpoint ring dequeue pointer past
713 	 * this TD.
714 	 */
715 	hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
716 	hw_deq &= ~0xf;
717 
718 	seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
719 			      cur_td->last_trb, hw_deq);
720 
721 	if (seg && (pep->ep_state & EP_ENABLED))
722 		cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
723 					     cur_td, &deq_state);
724 	else
725 		cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
726 
727 	/*
728 	 * The event handler won't see a completion for this TD anymore,
729 	 * so remove it from the endpoint ring's TD list.
730 	 */
731 	list_del_init(&cur_td->td_list);
732 	ep_ring->num_tds--;
733 	pep->stream_info.td_count--;
734 
735 	/*
736 	 * During disconnecting all endpoint will be disabled so we don't
737 	 * have to worry about updating dequeue pointer.
738 	 */
739 	if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
740 		status = -ESHUTDOWN;
741 		ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
742 	}
743 
744 	cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
745 	cdnsp_gadget_giveback(pep, cur_td->preq, status);
746 
747 	return ret;
748 }
749 
750 static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
751 {
752 	struct cdnsp_port *port = pdev->active_port;
753 	u8 old_port = 0;
754 
755 	if (port && port->port_num == port_id)
756 		return 0;
757 
758 	if (port)
759 		old_port = port->port_num;
760 
761 	if (port_id == pdev->usb2_port.port_num) {
762 		port = &pdev->usb2_port;
763 	} else if (port_id == pdev->usb3_port.port_num) {
764 		port  = &pdev->usb3_port;
765 	} else {
766 		dev_err(pdev->dev, "Port event with invalid port ID %d\n",
767 			port_id);
768 		return -EINVAL;
769 	}
770 
771 	if (port_id != old_port) {
772 		cdnsp_disable_slot(pdev);
773 		pdev->active_port = port;
774 		cdnsp_enable_slot(pdev);
775 	}
776 
777 	if (port_id == pdev->usb2_port.port_num)
778 		cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
779 	else
780 		writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
781 		       &pdev->usb3_port.regs->portpmsc);
782 
783 	return 0;
784 }
785 
786 static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
787 				     union cdnsp_trb *event)
788 {
789 	struct cdnsp_port_regs __iomem *port_regs;
790 	u32 portsc, cmd_regs;
791 	bool port2 = false;
792 	u32 link_state;
793 	u32 port_id;
794 
795 	/* Port status change events always have a successful completion code */
796 	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
797 		dev_err(pdev->dev, "ERR: incorrect PSC event\n");
798 
799 	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
800 
801 	if (cdnsp_update_port_id(pdev, port_id))
802 		goto cleanup;
803 
804 	port_regs = pdev->active_port->regs;
805 
806 	if (port_id == pdev->usb2_port.port_num)
807 		port2 = true;
808 
809 new_event:
810 	portsc = readl(&port_regs->portsc);
811 	writel(cdnsp_port_state_to_neutral(portsc) |
812 	       (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
813 
814 	trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
815 
816 	pdev->gadget.speed = cdnsp_port_speed(portsc);
817 	link_state = portsc & PORT_PLS_MASK;
818 
819 	/* Port Link State change detected. */
820 	if ((portsc & PORT_PLC)) {
821 		if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING)  &&
822 		    link_state == XDEV_RESUME) {
823 			cmd_regs = readl(&pdev->op_regs->command);
824 			if (!(cmd_regs & CMD_R_S))
825 				goto cleanup;
826 
827 			if (DEV_SUPERSPEED_ANY(portsc)) {
828 				cdnsp_set_link_state(pdev, &port_regs->portsc,
829 						     XDEV_U0);
830 
831 				cdnsp_resume_gadget(pdev);
832 			}
833 		}
834 
835 		if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
836 		    link_state == XDEV_U0) {
837 			pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
838 
839 			cdnsp_force_header_wakeup(pdev, 1);
840 			cdnsp_ring_cmd_db(pdev);
841 			cdnsp_wait_for_cmd_compl(pdev);
842 		}
843 
844 		if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
845 		    !DEV_SUPERSPEED_ANY(portsc))
846 			cdnsp_resume_gadget(pdev);
847 
848 		if (link_state == XDEV_U3 &&  pdev->link_state != XDEV_U3)
849 			cdnsp_suspend_gadget(pdev);
850 
851 		pdev->link_state = link_state;
852 	}
853 
854 	if (portsc & PORT_CSC) {
855 		/* Detach device. */
856 		if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
857 			cdnsp_disconnect_gadget(pdev);
858 
859 		/* Attach device. */
860 		if (portsc & PORT_CONNECT) {
861 			if (!port2)
862 				cdnsp_irq_reset(pdev);
863 
864 			usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
865 		}
866 	}
867 
868 	/* Port reset. */
869 	if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
870 		cdnsp_irq_reset(pdev);
871 		pdev->u1_allowed = 0;
872 		pdev->u2_allowed = 0;
873 		pdev->may_wakeup = 0;
874 	}
875 
876 	if (portsc & PORT_CEC)
877 		dev_err(pdev->dev, "Port Over Current detected\n");
878 
879 	if (portsc & PORT_CEC)
880 		dev_err(pdev->dev, "Port Configure Error detected\n");
881 
882 	if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
883 		goto new_event;
884 
885 cleanup:
886 	cdnsp_inc_deq(pdev, pdev->event_ring);
887 }
888 
889 static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
890 			     struct cdnsp_td *td,
891 			     struct cdnsp_ring *ep_ring,
892 			     int *status)
893 {
894 	struct cdnsp_request *preq = td->preq;
895 
896 	/* if a bounce buffer was used to align this td then unmap it */
897 	cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
898 
899 	/*
900 	 * If the controller said we transferred more data than the buffer
901 	 * length, Play it safe and say we didn't transfer anything.
902 	 */
903 	if (preq->request.actual > preq->request.length) {
904 		preq->request.actual = 0;
905 		*status = 0;
906 	}
907 
908 	list_del_init(&td->td_list);
909 	ep_ring->num_tds--;
910 	preq->pep->stream_info.td_count--;
911 
912 	cdnsp_gadget_giveback(preq->pep, preq, *status);
913 }
914 
915 static void cdnsp_finish_td(struct cdnsp_device *pdev,
916 			    struct cdnsp_td *td,
917 			    struct cdnsp_transfer_event *event,
918 			    struct cdnsp_ep *ep,
919 			    int *status)
920 {
921 	struct cdnsp_ring *ep_ring;
922 	u32 trb_comp_code;
923 
924 	ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
925 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
926 
927 	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
928 	    trb_comp_code == COMP_STOPPED ||
929 	    trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
930 		/*
931 		 * The Endpoint Stop Command completion will take care of any
932 		 * stopped TDs. A stopped TD may be restarted, so don't update
933 		 * the ring dequeue pointer or take this TD off any lists yet.
934 		 */
935 		return;
936 	}
937 
938 	/* Update ring dequeue pointer */
939 	while (ep_ring->dequeue != td->last_trb)
940 		cdnsp_inc_deq(pdev, ep_ring);
941 
942 	cdnsp_inc_deq(pdev, ep_ring);
943 
944 	cdnsp_td_cleanup(pdev, td, ep_ring, status);
945 }
946 
947 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
948 static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
949 				 struct cdnsp_ring *ring,
950 				 union cdnsp_trb *stop_trb)
951 {
952 	struct cdnsp_segment *seg = ring->deq_seg;
953 	union cdnsp_trb *trb = ring->dequeue;
954 	u32 sum;
955 
956 	for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
957 		if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
958 			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
959 	}
960 	return sum;
961 }
962 
963 static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
964 				    struct cdnsp_ep *pep,
965 				    unsigned int stream_id,
966 				    int start_cycle,
967 				    struct cdnsp_generic_trb *start_trb)
968 {
969 	/*
970 	 * Pass all the TRBs to the hardware at once and make sure this write
971 	 * isn't reordered.
972 	 */
973 	wmb();
974 
975 	if (start_cycle)
976 		start_trb->field[3] |= cpu_to_le32(start_cycle);
977 	else
978 		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
979 
980 	if ((pep->ep_state & EP_HAS_STREAMS) &&
981 	    !pep->stream_info.first_prime_det) {
982 		trace_cdnsp_wait_for_prime(pep, stream_id);
983 		return 0;
984 	}
985 
986 	return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
987 }
988 
989 /*
990  * Process control tds, update USB request status and actual_length.
991  */
992 static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
993 				  struct cdnsp_td *td,
994 				  union cdnsp_trb *event_trb,
995 				  struct cdnsp_transfer_event *event,
996 				  struct cdnsp_ep *pep,
997 				  int *status)
998 {
999 	struct cdnsp_ring *ep_ring;
1000 	u32 remaining;
1001 	u32 trb_type;
1002 
1003 	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
1004 	ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1005 	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1006 
1007 	/*
1008 	 * if on data stage then update the actual_length of the USB
1009 	 * request and flag it as set, so it won't be overwritten in the event
1010 	 * for the last TRB.
1011 	 */
1012 	if (trb_type == TRB_DATA) {
1013 		td->request_length_set = true;
1014 		td->preq->request.actual = td->preq->request.length - remaining;
1015 	}
1016 
1017 	/* at status stage */
1018 	if (!td->request_length_set)
1019 		td->preq->request.actual = td->preq->request.length;
1020 
1021 	if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
1022 	    pdev->three_stage_setup) {
1023 		td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1024 				td_list);
1025 		pdev->ep0_stage = CDNSP_STATUS_STAGE;
1026 
1027 		cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1028 					 &td->last_trb->generic);
1029 		return;
1030 	}
1031 
1032 	*status = 0;
1033 
1034 	cdnsp_finish_td(pdev, td, event, pep, status);
1035 }
1036 
1037 /*
1038  * Process isochronous tds, update usb request status and actual_length.
1039  */
1040 static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
1041 				  struct cdnsp_td *td,
1042 				  union cdnsp_trb *ep_trb,
1043 				  struct cdnsp_transfer_event *event,
1044 				  struct cdnsp_ep *pep,
1045 				  int status)
1046 {
1047 	struct cdnsp_request *preq = td->preq;
1048 	u32 remaining, requested, ep_trb_len;
1049 	bool sum_trbs_for_length = false;
1050 	struct cdnsp_ring *ep_ring;
1051 	u32 trb_comp_code;
1052 	u32 td_length;
1053 
1054 	ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1055 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1056 	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1057 	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1058 
1059 	requested = preq->request.length;
1060 
1061 	/* handle completion code */
1062 	switch (trb_comp_code) {
1063 	case COMP_SUCCESS:
1064 		preq->request.status = 0;
1065 		break;
1066 	case COMP_SHORT_PACKET:
1067 		preq->request.status = 0;
1068 		sum_trbs_for_length = true;
1069 		break;
1070 	case COMP_ISOCH_BUFFER_OVERRUN:
1071 	case COMP_BABBLE_DETECTED_ERROR:
1072 		preq->request.status = -EOVERFLOW;
1073 		break;
1074 	case COMP_STOPPED:
1075 		sum_trbs_for_length = true;
1076 		break;
1077 	case COMP_STOPPED_SHORT_PACKET:
1078 		/* field normally containing residue now contains transferred */
1079 		preq->request.status  = 0;
1080 		requested = remaining;
1081 		break;
1082 	case COMP_STOPPED_LENGTH_INVALID:
1083 		requested = 0;
1084 		remaining = 0;
1085 		break;
1086 	default:
1087 		sum_trbs_for_length = true;
1088 		preq->request.status = -1;
1089 		break;
1090 	}
1091 
1092 	if (sum_trbs_for_length) {
1093 		td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1094 		td_length += ep_trb_len - remaining;
1095 	} else {
1096 		td_length = requested;
1097 	}
1098 
1099 	td->preq->request.actual += td_length;
1100 
1101 	cdnsp_finish_td(pdev, td, event, pep, &status);
1102 }
1103 
1104 static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
1105 			       struct cdnsp_td *td,
1106 			       struct cdnsp_transfer_event *event,
1107 			       struct cdnsp_ep *pep,
1108 			       int status)
1109 {
1110 	struct cdnsp_ring *ep_ring;
1111 
1112 	ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1113 	td->preq->request.status = -EXDEV;
1114 	td->preq->request.actual = 0;
1115 
1116 	/* Update ring dequeue pointer */
1117 	while (ep_ring->dequeue != td->last_trb)
1118 		cdnsp_inc_deq(pdev, ep_ring);
1119 
1120 	cdnsp_inc_deq(pdev, ep_ring);
1121 
1122 	cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1123 }
1124 
1125 /*
1126  * Process bulk and interrupt tds, update usb request status and actual_length.
1127  */
1128 static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
1129 				       struct cdnsp_td *td,
1130 				       union cdnsp_trb *ep_trb,
1131 				       struct cdnsp_transfer_event *event,
1132 				       struct cdnsp_ep *ep,
1133 				       int *status)
1134 {
1135 	u32 remaining, requested, ep_trb_len;
1136 	struct cdnsp_ring *ep_ring;
1137 	u32 trb_comp_code;
1138 
1139 	ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1140 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1141 	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1142 	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1143 	requested = td->preq->request.length;
1144 
1145 	switch (trb_comp_code) {
1146 	case COMP_SUCCESS:
1147 	case COMP_SHORT_PACKET:
1148 		*status = 0;
1149 		break;
1150 	case COMP_STOPPED_SHORT_PACKET:
1151 		td->preq->request.actual = remaining;
1152 		goto finish_td;
1153 	case COMP_STOPPED_LENGTH_INVALID:
1154 		/* Stopped on ep trb with invalid length, exclude it. */
1155 		ep_trb_len = 0;
1156 		remaining = 0;
1157 		break;
1158 	}
1159 
1160 	if (ep_trb == td->last_trb)
1161 		ep_trb_len = requested - remaining;
1162 	else
1163 		ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1164 						   ep_trb_len - remaining;
1165 	td->preq->request.actual = ep_trb_len;
1166 
1167 finish_td:
1168 	ep->stream_info.drbls_count--;
1169 
1170 	cdnsp_finish_td(pdev, td, event, ep, status);
1171 }
1172 
1173 static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
1174 				 struct cdnsp_transfer_event *event)
1175 {
1176 	struct cdnsp_generic_trb *generic;
1177 	struct cdnsp_ring *ep_ring;
1178 	struct cdnsp_ep *pep;
1179 	int cur_stream;
1180 	int ep_index;
1181 	int host_sid;
1182 	int dev_sid;
1183 
1184 	generic = (struct cdnsp_generic_trb *)event;
1185 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1186 	dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
1187 	host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
1188 
1189 	pep = &pdev->eps[ep_index];
1190 
1191 	if (!(pep->ep_state & EP_HAS_STREAMS))
1192 		return;
1193 
1194 	if (host_sid == STREAM_PRIME_ACK) {
1195 		pep->stream_info.first_prime_det = 1;
1196 		for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
1197 		    cur_stream++) {
1198 			ep_ring = pep->stream_info.stream_rings[cur_stream];
1199 			ep_ring->stream_active = 1;
1200 			ep_ring->stream_rejected = 0;
1201 		}
1202 	}
1203 
1204 	if (host_sid == STREAM_REJECTED) {
1205 		struct cdnsp_td *td, *td_temp;
1206 
1207 		pep->stream_info.drbls_count--;
1208 		ep_ring = pep->stream_info.stream_rings[dev_sid];
1209 		ep_ring->stream_active = 0;
1210 		ep_ring->stream_rejected = 1;
1211 
1212 		list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1213 					 td_list) {
1214 			td->drbl = 0;
1215 		}
1216 	}
1217 
1218 	cdnsp_ring_doorbell_for_active_rings(pdev, pep);
1219 }
1220 
1221 /*
1222  * If this function returns an error condition, it means it got a Transfer
1223  * event with a corrupted TRB DMA address or endpoint is disabled.
1224  */
1225 static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
1226 				 struct cdnsp_transfer_event *event)
1227 {
1228 	const struct usb_endpoint_descriptor *desc;
1229 	bool handling_skipped_tds = false;
1230 	struct cdnsp_segment *ep_seg;
1231 	struct cdnsp_ring *ep_ring;
1232 	int status = -EINPROGRESS;
1233 	union cdnsp_trb *ep_trb;
1234 	dma_addr_t ep_trb_dma;
1235 	struct cdnsp_ep *pep;
1236 	struct cdnsp_td *td;
1237 	u32 trb_comp_code;
1238 	int invalidate;
1239 	int ep_index;
1240 
1241 	invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
1242 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1243 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1244 	ep_trb_dma = le64_to_cpu(event->buffer);
1245 
1246 	pep = &pdev->eps[ep_index];
1247 	ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1248 
1249 	/*
1250 	 * If device is disconnect then all requests will be dequeued
1251 	 * by upper layers as part of disconnect sequence.
1252 	 * We don't want handle such event to avoid racing.
1253 	 */
1254 	if (invalidate || !pdev->gadget.connected)
1255 		goto cleanup;
1256 
1257 	if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
1258 		trace_cdnsp_ep_disabled(pep->out_ctx);
1259 		goto err_out;
1260 	}
1261 
1262 	/* Some transfer events don't always point to a trb*/
1263 	if (!ep_ring) {
1264 		switch (trb_comp_code) {
1265 		case COMP_INVALID_STREAM_TYPE_ERROR:
1266 		case COMP_INVALID_STREAM_ID_ERROR:
1267 		case COMP_RING_UNDERRUN:
1268 		case COMP_RING_OVERRUN:
1269 			goto cleanup;
1270 		default:
1271 			dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
1272 				pep->name);
1273 			goto err_out;
1274 		}
1275 	}
1276 
1277 	/* Look for some error cases that need special treatment. */
1278 	switch (trb_comp_code) {
1279 	case COMP_BABBLE_DETECTED_ERROR:
1280 		status = -EOVERFLOW;
1281 		break;
1282 	case COMP_RING_UNDERRUN:
1283 	case COMP_RING_OVERRUN:
1284 		/*
1285 		 * When the Isoch ring is empty, the controller will generate
1286 		 * a Ring Overrun Event for IN Isoch endpoint or Ring
1287 		 * Underrun Event for OUT Isoch endpoint.
1288 		 */
1289 		goto cleanup;
1290 	case COMP_MISSED_SERVICE_ERROR:
1291 		/*
1292 		 * When encounter missed service error, one or more isoc tds
1293 		 * may be missed by controller.
1294 		 * Set skip flag of the ep_ring; Complete the missed tds as
1295 		 * short transfer when process the ep_ring next time.
1296 		 */
1297 		pep->skip = true;
1298 		break;
1299 	}
1300 
1301 	do {
1302 		/*
1303 		 * This TRB should be in the TD at the head of this ring's TD
1304 		 * list.
1305 		 */
1306 		if (list_empty(&ep_ring->td_list)) {
1307 			/*
1308 			 * Don't print warnings if it's due to a stopped
1309 			 * endpoint generating an extra completion event, or
1310 			 * a event for the last TRB of a short TD we already
1311 			 * got a short event for.
1312 			 * The short TD is already removed from the TD list.
1313 			 */
1314 			if (!(trb_comp_code == COMP_STOPPED ||
1315 			      trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1316 			      ep_ring->last_td_was_short))
1317 				trace_cdnsp_trb_without_td(ep_ring,
1318 					(struct cdnsp_generic_trb *)event);
1319 
1320 			if (pep->skip) {
1321 				pep->skip = false;
1322 				trace_cdnsp_ep_list_empty_with_skip(pep, 0);
1323 			}
1324 
1325 			goto cleanup;
1326 		}
1327 
1328 		td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1329 				td_list);
1330 
1331 		/* Is this a TRB in the currently executing TD? */
1332 		ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1333 					 ep_ring->dequeue, td->last_trb,
1334 					 ep_trb_dma);
1335 
1336 		desc = td->preq->pep->endpoint.desc;
1337 
1338 		if (ep_seg) {
1339 			ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
1340 					       / sizeof(*ep_trb)];
1341 
1342 			trace_cdnsp_handle_transfer(ep_ring,
1343 					(struct cdnsp_generic_trb *)ep_trb);
1344 
1345 			if (pep->skip && usb_endpoint_xfer_isoc(desc) &&
1346 			    td->last_trb != ep_trb)
1347 				return -EAGAIN;
1348 		}
1349 
1350 		/*
1351 		 * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
1352 		 * of FSE is not in the current TD pointed by ep_ring->dequeue
1353 		 * because that the hardware dequeue pointer still at the
1354 		 * previous TRB of the current TD. The previous TRB maybe a
1355 		 * Link TD or the last TRB of the previous TD. The command
1356 		 * completion handle will take care the rest.
1357 		 */
1358 		if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
1359 				trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
1360 			pep->skip = false;
1361 			goto cleanup;
1362 		}
1363 
1364 		if (!ep_seg) {
1365 			if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
1366 				/* Something is busted, give up! */
1367 				dev_err(pdev->dev,
1368 					"ERROR Transfer event TRB DMA ptr not "
1369 					"part of current TD ep_index %d "
1370 					"comp_code %u\n", ep_index,
1371 					trb_comp_code);
1372 				return -EINVAL;
1373 			}
1374 
1375 			cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1376 			goto cleanup;
1377 		}
1378 
1379 		if (trb_comp_code == COMP_SHORT_PACKET)
1380 			ep_ring->last_td_was_short = true;
1381 		else
1382 			ep_ring->last_td_was_short = false;
1383 
1384 		if (pep->skip) {
1385 			pep->skip = false;
1386 			cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1387 			goto cleanup;
1388 		}
1389 
1390 		if (cdnsp_trb_is_noop(ep_trb))
1391 			goto cleanup;
1392 
1393 		if (usb_endpoint_xfer_control(desc))
1394 			cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
1395 					      &status);
1396 		else if (usb_endpoint_xfer_isoc(desc))
1397 			cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
1398 					      status);
1399 		else
1400 			cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
1401 						   &status);
1402 cleanup:
1403 		handling_skipped_tds = pep->skip;
1404 
1405 		/*
1406 		 * Do not update event ring dequeue pointer if we're in a loop
1407 		 * processing missed tds.
1408 		 */
1409 		if (!handling_skipped_tds)
1410 			cdnsp_inc_deq(pdev, pdev->event_ring);
1411 
1412 	/*
1413 	 * If ep->skip is set, it means there are missed tds on the
1414 	 * endpoint ring need to take care of.
1415 	 * Process them as short transfer until reach the td pointed by
1416 	 * the event.
1417 	 */
1418 	} while (handling_skipped_tds);
1419 	return 0;
1420 
1421 err_out:
1422 	dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
1423 		(unsigned long long)
1424 		cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1425 				      pdev->event_ring->dequeue),
1426 		 lower_32_bits(le64_to_cpu(event->buffer)),
1427 		 upper_32_bits(le64_to_cpu(event->buffer)),
1428 		 le32_to_cpu(event->transfer_len),
1429 		 le32_to_cpu(event->flags));
1430 	return -EINVAL;
1431 }
1432 
1433 /*
1434  * This function handles all events on the event ring.
1435  * Returns true for "possibly more events to process" (caller should call
1436  * again), otherwise false if done.
1437  */
1438 static bool cdnsp_handle_event(struct cdnsp_device *pdev)
1439 {
1440 	unsigned int comp_code;
1441 	union cdnsp_trb *event;
1442 	bool update_ptrs = true;
1443 	u32 cycle_bit;
1444 	int ret = 0;
1445 	u32 flags;
1446 
1447 	event = pdev->event_ring->dequeue;
1448 	flags = le32_to_cpu(event->event_cmd.flags);
1449 	cycle_bit = (flags & TRB_CYCLE);
1450 
1451 	/* Does the controller or driver own the TRB? */
1452 	if (cycle_bit != pdev->event_ring->cycle_state)
1453 		return false;
1454 
1455 	trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
1456 
1457 	/*
1458 	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
1459 	 * reads of the event's flags/data below.
1460 	 */
1461 	rmb();
1462 
1463 	switch (flags & TRB_TYPE_BITMASK) {
1464 	case TRB_TYPE(TRB_COMPLETION):
1465 		/*
1466 		 * Command can't be handled in interrupt context so just
1467 		 * increment command ring dequeue pointer.
1468 		 */
1469 		cdnsp_inc_deq(pdev, pdev->cmd_ring);
1470 		break;
1471 	case TRB_TYPE(TRB_PORT_STATUS):
1472 		cdnsp_handle_port_status(pdev, event);
1473 		update_ptrs = false;
1474 		break;
1475 	case TRB_TYPE(TRB_TRANSFER):
1476 		ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
1477 		if (ret >= 0)
1478 			update_ptrs = false;
1479 		break;
1480 	case TRB_TYPE(TRB_SETUP):
1481 		pdev->ep0_stage = CDNSP_SETUP_STAGE;
1482 		pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
1483 		pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
1484 		pdev->setup = *((struct usb_ctrlrequest *)
1485 				&event->trans_event.buffer);
1486 
1487 		cdnsp_setup_analyze(pdev);
1488 		break;
1489 	case TRB_TYPE(TRB_ENDPOINT_NRDY):
1490 		cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
1491 		break;
1492 	case TRB_TYPE(TRB_HC_EVENT): {
1493 		comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
1494 
1495 		switch (comp_code) {
1496 		case COMP_EVENT_RING_FULL_ERROR:
1497 			dev_err(pdev->dev, "Event Ring Full\n");
1498 			break;
1499 		default:
1500 			dev_err(pdev->dev, "Controller error code 0x%02x\n",
1501 				comp_code);
1502 		}
1503 
1504 		break;
1505 	}
1506 	case TRB_TYPE(TRB_MFINDEX_WRAP):
1507 	case TRB_TYPE(TRB_DRB_OVERFLOW):
1508 		break;
1509 	default:
1510 		dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
1511 			 TRB_FIELD_TO_TYPE(flags));
1512 	}
1513 
1514 	if (update_ptrs)
1515 		/* Update SW event ring dequeue pointer. */
1516 		cdnsp_inc_deq(pdev, pdev->event_ring);
1517 
1518 	/*
1519 	 * Caller will call us again to check if there are more items
1520 	 * on the event ring.
1521 	 */
1522 	return true;
1523 }
1524 
1525 irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
1526 {
1527 	struct cdnsp_device *pdev = (struct cdnsp_device *)data;
1528 	union cdnsp_trb *event_ring_deq;
1529 	unsigned long flags;
1530 	int counter = 0;
1531 
1532 	local_bh_disable();
1533 	spin_lock_irqsave(&pdev->lock, flags);
1534 
1535 	if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
1536 		/*
1537 		 * While removing or stopping driver there may still be deferred
1538 		 * not handled interrupt which should not be treated as error.
1539 		 * Driver should simply ignore it.
1540 		 */
1541 		if (pdev->gadget_driver)
1542 			cdnsp_died(pdev);
1543 
1544 		spin_unlock_irqrestore(&pdev->lock, flags);
1545 		local_bh_enable();
1546 		return IRQ_HANDLED;
1547 	}
1548 
1549 	event_ring_deq = pdev->event_ring->dequeue;
1550 
1551 	while (cdnsp_handle_event(pdev)) {
1552 		if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
1553 			cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
1554 			event_ring_deq = pdev->event_ring->dequeue;
1555 			counter = 0;
1556 		}
1557 	}
1558 
1559 	cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
1560 
1561 	spin_unlock_irqrestore(&pdev->lock, flags);
1562 	local_bh_enable();
1563 
1564 	return IRQ_HANDLED;
1565 }
1566 
1567 irqreturn_t cdnsp_irq_handler(int irq, void *priv)
1568 {
1569 	struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
1570 	u32 irq_pending;
1571 	u32 status;
1572 
1573 	status = readl(&pdev->op_regs->status);
1574 
1575 	if (status == ~(u32)0) {
1576 		cdnsp_died(pdev);
1577 		return IRQ_HANDLED;
1578 	}
1579 
1580 	if (!(status & STS_EINT))
1581 		return IRQ_NONE;
1582 
1583 	writel(status | STS_EINT, &pdev->op_regs->status);
1584 	irq_pending = readl(&pdev->ir_set->irq_pending);
1585 	irq_pending |= IMAN_IP;
1586 	writel(irq_pending, &pdev->ir_set->irq_pending);
1587 
1588 	if (status & STS_FATAL) {
1589 		cdnsp_died(pdev);
1590 		return IRQ_HANDLED;
1591 	}
1592 
1593 	return IRQ_WAKE_THREAD;
1594 }
1595 
1596 /*
1597  * Generic function for queuing a TRB on a ring.
1598  * The caller must have checked to make sure there's room on the ring.
1599  *
1600  * @more_trbs_coming:	Will you enqueue more TRBs before setting doorbell?
1601  */
1602 static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
1603 			    bool more_trbs_coming, u32 field1, u32 field2,
1604 			    u32 field3, u32 field4)
1605 {
1606 	struct cdnsp_generic_trb *trb;
1607 
1608 	trb = &ring->enqueue->generic;
1609 
1610 	trb->field[0] = cpu_to_le32(field1);
1611 	trb->field[1] = cpu_to_le32(field2);
1612 	trb->field[2] = cpu_to_le32(field3);
1613 	trb->field[3] = cpu_to_le32(field4);
1614 
1615 	trace_cdnsp_queue_trb(ring, trb);
1616 	cdnsp_inc_enq(pdev, ring, more_trbs_coming);
1617 }
1618 
1619 /*
1620  * Does various checks on the endpoint ring, and makes it ready to
1621  * queue num_trbs.
1622  */
1623 static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
1624 			      struct cdnsp_ring *ep_ring,
1625 			      u32 ep_state, unsigned
1626 			      int num_trbs,
1627 			      gfp_t mem_flags)
1628 {
1629 	unsigned int num_trbs_needed;
1630 
1631 	/* Make sure the endpoint has been added to controller schedule. */
1632 	switch (ep_state) {
1633 	case EP_STATE_STOPPED:
1634 	case EP_STATE_RUNNING:
1635 	case EP_STATE_HALTED:
1636 		break;
1637 	default:
1638 		dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
1639 		return -EINVAL;
1640 	}
1641 
1642 	while (1) {
1643 		if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1644 			break;
1645 
1646 		trace_cdnsp_no_room_on_ring("try ring expansion");
1647 
1648 		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1649 		if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1650 					 mem_flags)) {
1651 			dev_err(pdev->dev, "Ring expansion failed\n");
1652 			return -ENOMEM;
1653 		}
1654 	}
1655 
1656 	while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1657 		ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1658 		/* The cycle bit must be set as the last operation. */
1659 		wmb();
1660 		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1661 
1662 		/* Toggle the cycle bit after the last ring segment. */
1663 		if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1664 			ep_ring->cycle_state ^= 1;
1665 		ep_ring->enq_seg = ep_ring->enq_seg->next;
1666 		ep_ring->enqueue = ep_ring->enq_seg->trbs;
1667 	}
1668 	return 0;
1669 }
1670 
1671 static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
1672 				  struct cdnsp_request *preq,
1673 				  unsigned int num_trbs)
1674 {
1675 	struct cdnsp_ring *ep_ring;
1676 	int ret;
1677 
1678 	ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1679 					  preq->request.stream_id);
1680 	if (!ep_ring)
1681 		return -EINVAL;
1682 
1683 	ret = cdnsp_prepare_ring(pdev, ep_ring,
1684 				 GET_EP_CTX_STATE(preq->pep->out_ctx),
1685 				 num_trbs, GFP_ATOMIC);
1686 	if (ret)
1687 		return ret;
1688 
1689 	INIT_LIST_HEAD(&preq->td.td_list);
1690 	preq->td.preq = preq;
1691 
1692 	/* Add this TD to the tail of the endpoint ring's TD list. */
1693 	list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1694 	ep_ring->num_tds++;
1695 	preq->pep->stream_info.td_count++;
1696 
1697 	preq->td.start_seg = ep_ring->enq_seg;
1698 	preq->td.first_trb = ep_ring->enqueue;
1699 
1700 	return 0;
1701 }
1702 
1703 static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
1704 {
1705 	unsigned int num_trbs;
1706 
1707 	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
1708 				TRB_MAX_BUFF_SIZE);
1709 	if (num_trbs == 0)
1710 		num_trbs++;
1711 
1712 	return num_trbs;
1713 }
1714 
1715 static unsigned int count_trbs_needed(struct cdnsp_request *preq)
1716 {
1717 	return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1718 }
1719 
1720 static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
1721 {
1722 	unsigned int i, len, full_len, num_trbs = 0;
1723 	struct scatterlist *sg;
1724 
1725 	full_len = preq->request.length;
1726 
1727 	for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
1728 		len = sg_dma_len(sg);
1729 		num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
1730 		len = min(len, full_len);
1731 		full_len -= len;
1732 		if (full_len == 0)
1733 			break;
1734 	}
1735 
1736 	return num_trbs;
1737 }
1738 
1739 static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
1740 {
1741 	if (running_total != preq->request.length)
1742 		dev_err(preq->pep->pdev->dev,
1743 			"%s - Miscalculated tx length, "
1744 			"queued %#x, asked for %#x (%d)\n",
1745 			preq->pep->name, running_total,
1746 			preq->request.length, preq->request.actual);
1747 }
1748 
1749 /*
1750  * TD size is the number of max packet sized packets remaining in the TD
1751  * (*not* including this TRB).
1752  *
1753  * Total TD packet count = total_packet_count =
1754  *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
1755  *
1756  * Packets transferred up to and including this TRB = packets_transferred =
1757  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
1758  *
1759  * TD size = total_packet_count - packets_transferred
1760  *
1761  * It must fit in bits 21:17, so it can't be bigger than 31.
1762  * This is taken care of in the TRB_TD_SIZE() macro
1763  *
1764  * The last TRB in a TD must have the TD size set to zero.
1765  */
1766 static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
1767 			      int transferred,
1768 			      int trb_buff_len,
1769 			      unsigned int td_total_len,
1770 			      struct cdnsp_request *preq,
1771 			      bool more_trbs_coming,
1772 			      bool zlp)
1773 {
1774 	u32 maxp, total_packet_count;
1775 
1776 	/* Before ZLP driver needs set TD_SIZE = 1. */
1777 	if (zlp)
1778 		return 1;
1779 
1780 	/* One TRB with a zero-length data packet. */
1781 	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
1782 	    trb_buff_len == td_total_len)
1783 		return 0;
1784 
1785 	maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
1786 	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
1787 
1788 	/* Queuing functions don't count the current TRB into transferred. */
1789 	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
1790 }
1791 
1792 static int cdnsp_align_td(struct cdnsp_device *pdev,
1793 			  struct cdnsp_request *preq, u32 enqd_len,
1794 			  u32 *trb_buff_len, struct cdnsp_segment *seg)
1795 {
1796 	struct device *dev = pdev->dev;
1797 	unsigned int unalign;
1798 	unsigned int max_pkt;
1799 	u32 new_buff_len;
1800 
1801 	max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
1802 	unalign = (enqd_len + *trb_buff_len) % max_pkt;
1803 
1804 	/* We got lucky, last normal TRB data on segment is packet aligned. */
1805 	if (unalign == 0)
1806 		return 0;
1807 
1808 	/* Is the last nornal TRB alignable by splitting it. */
1809 	if (*trb_buff_len > unalign) {
1810 		*trb_buff_len -= unalign;
1811 		trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
1812 						  enqd_len, 0, unalign);
1813 		return 0;
1814 	}
1815 
1816 	/*
1817 	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
1818 	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
1819 	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
1820 	 */
1821 	new_buff_len = max_pkt - (enqd_len % max_pkt);
1822 
1823 	if (new_buff_len > (preq->request.length - enqd_len))
1824 		new_buff_len = (preq->request.length - enqd_len);
1825 
1826 	/* Create a max max_pkt sized bounce buffer pointed to by last trb. */
1827 	if (preq->direction) {
1828 		sg_pcopy_to_buffer(preq->request.sg,
1829 				   preq->request.num_mapped_sgs,
1830 				   seg->bounce_buf, new_buff_len, enqd_len);
1831 		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1832 						 max_pkt, DMA_TO_DEVICE);
1833 	} else {
1834 		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1835 						 max_pkt, DMA_FROM_DEVICE);
1836 	}
1837 
1838 	if (dma_mapping_error(dev, seg->bounce_dma)) {
1839 		/* Try without aligning.*/
1840 		dev_warn(pdev->dev,
1841 			 "Failed mapping bounce buffer, not aligning\n");
1842 		return 0;
1843 	}
1844 
1845 	*trb_buff_len = new_buff_len;
1846 	seg->bounce_len = new_buff_len;
1847 	seg->bounce_offs = enqd_len;
1848 
1849 	trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
1850 			       unalign);
1851 
1852 	/*
1853 	 * Bounce buffer successful aligned and seg->bounce_dma will be used
1854 	 * in transfer TRB as new transfer buffer address.
1855 	 */
1856 	return 1;
1857 }
1858 
1859 int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1860 {
1861 	unsigned int enqd_len, block_len, trb_buff_len, full_len;
1862 	unsigned int start_cycle, num_sgs = 0;
1863 	struct cdnsp_generic_trb *start_trb;
1864 	u32 field, length_field, remainder;
1865 	struct scatterlist *sg = NULL;
1866 	bool more_trbs_coming = true;
1867 	bool need_zero_pkt = false;
1868 	bool zero_len_trb = false;
1869 	struct cdnsp_ring *ring;
1870 	bool first_trb = true;
1871 	unsigned int num_trbs;
1872 	struct cdnsp_ep *pep;
1873 	u64 addr, send_addr;
1874 	int sent_len, ret;
1875 
1876 	ring = cdnsp_request_to_transfer_ring(pdev, preq);
1877 	if (!ring)
1878 		return -EINVAL;
1879 
1880 	full_len = preq->request.length;
1881 
1882 	if (preq->request.num_sgs) {
1883 		num_sgs = preq->request.num_sgs;
1884 		sg = preq->request.sg;
1885 		addr = (u64)sg_dma_address(sg);
1886 		block_len = sg_dma_len(sg);
1887 		num_trbs = count_sg_trbs_needed(preq);
1888 	} else {
1889 		num_trbs = count_trbs_needed(preq);
1890 		addr = (u64)preq->request.dma;
1891 		block_len = full_len;
1892 	}
1893 
1894 	pep = preq->pep;
1895 
1896 	/* Deal with request.zero - need one more td/trb. */
1897 	if (preq->request.zero && preq->request.length &&
1898 	    IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
1899 		need_zero_pkt = true;
1900 		num_trbs++;
1901 	}
1902 
1903 	ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
1904 	if (ret)
1905 		return ret;
1906 
1907 	/*
1908 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1909 	 * until we've finished creating all the other TRBs. The ring's cycle
1910 	 * state may change as we enqueue the other TRBs, so save it too.
1911 	 */
1912 	start_trb = &ring->enqueue->generic;
1913 	start_cycle = ring->cycle_state;
1914 	send_addr = addr;
1915 
1916 	/* Queue the TRBs, even if they are zero-length */
1917 	for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
1918 	     enqd_len += trb_buff_len) {
1919 		field = TRB_TYPE(TRB_NORMAL);
1920 
1921 		/* TRB buffer should not cross 64KB boundaries */
1922 		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
1923 		trb_buff_len = min(trb_buff_len, block_len);
1924 		if (enqd_len + trb_buff_len > full_len)
1925 			trb_buff_len = full_len - enqd_len;
1926 
1927 		/* Don't change the cycle bit of the first TRB until later */
1928 		if (first_trb) {
1929 			first_trb = false;
1930 			if (start_cycle == 0)
1931 				field |= TRB_CYCLE;
1932 		} else {
1933 			field |= ring->cycle_state;
1934 		}
1935 
1936 		/*
1937 		 * Chain all the TRBs together; clear the chain bit in the last
1938 		 * TRB to indicate it's the last TRB in the chain.
1939 		 */
1940 		if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
1941 			field |= TRB_CHAIN;
1942 			if (cdnsp_trb_is_link(ring->enqueue + 1)) {
1943 				if (cdnsp_align_td(pdev, preq, enqd_len,
1944 						   &trb_buff_len,
1945 						   ring->enq_seg)) {
1946 					send_addr = ring->enq_seg->bounce_dma;
1947 					/* Assuming TD won't span 2 segs */
1948 					preq->td.bounce_seg = ring->enq_seg;
1949 				}
1950 			}
1951 		}
1952 
1953 		if (enqd_len + trb_buff_len >= full_len) {
1954 			if (need_zero_pkt && !zero_len_trb) {
1955 				zero_len_trb = true;
1956 			} else {
1957 				zero_len_trb = false;
1958 				field &= ~TRB_CHAIN;
1959 				field |= TRB_IOC;
1960 				more_trbs_coming = false;
1961 				need_zero_pkt = false;
1962 				preq->td.last_trb = ring->enqueue;
1963 			}
1964 		}
1965 
1966 		/* Only set interrupt on short packet for OUT endpoints. */
1967 		if (!preq->direction)
1968 			field |= TRB_ISP;
1969 
1970 		/* Set the TRB length, TD size, and interrupter fields. */
1971 		remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
1972 					       full_len, preq,
1973 					       more_trbs_coming,
1974 					       zero_len_trb);
1975 
1976 		length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
1977 			TRB_INTR_TARGET(0);
1978 
1979 		cdnsp_queue_trb(pdev, ring, more_trbs_coming,
1980 				lower_32_bits(send_addr),
1981 				upper_32_bits(send_addr),
1982 				length_field,
1983 				field);
1984 
1985 		addr += trb_buff_len;
1986 		sent_len = trb_buff_len;
1987 		while (sg && sent_len >= block_len) {
1988 			/* New sg entry */
1989 			--num_sgs;
1990 			sent_len -= block_len;
1991 			if (num_sgs != 0) {
1992 				sg = sg_next(sg);
1993 				block_len = sg_dma_len(sg);
1994 				addr = (u64)sg_dma_address(sg);
1995 				addr += sent_len;
1996 			}
1997 		}
1998 		block_len -= sent_len;
1999 		send_addr = addr;
2000 	}
2001 
2002 	cdnsp_check_trb_math(preq, enqd_len);
2003 	ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
2004 				       start_cycle, start_trb);
2005 
2006 	if (ret)
2007 		preq->td.drbl = 1;
2008 
2009 	return 0;
2010 }
2011 
2012 int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
2013 {
2014 	u32 field, length_field, zlp = 0;
2015 	struct cdnsp_ep *pep = preq->pep;
2016 	struct cdnsp_ring *ep_ring;
2017 	int num_trbs;
2018 	u32 maxp;
2019 	int ret;
2020 
2021 	ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2022 	if (!ep_ring)
2023 		return -EINVAL;
2024 
2025 	/* 1 TRB for data, 1 for status */
2026 	num_trbs = (pdev->three_stage_setup) ? 2 : 1;
2027 
2028 	maxp = usb_endpoint_maxp(pep->endpoint.desc);
2029 
2030 	if (preq->request.zero && preq->request.length &&
2031 	    (preq->request.length % maxp == 0)) {
2032 		num_trbs++;
2033 		zlp = 1;
2034 	}
2035 
2036 	ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
2037 	if (ret)
2038 		return ret;
2039 
2040 	/* If there's data, queue data TRBs */
2041 	if (preq->request.length > 0) {
2042 		field = TRB_TYPE(TRB_DATA);
2043 
2044 		if (zlp)
2045 			field |= TRB_CHAIN;
2046 		else
2047 			field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP);
2048 
2049 		if (pdev->ep0_expect_in)
2050 			field |= TRB_DIR_IN;
2051 
2052 		length_field = TRB_LEN(preq->request.length) |
2053 			       TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0);
2054 
2055 		cdnsp_queue_trb(pdev, ep_ring, true,
2056 				lower_32_bits(preq->request.dma),
2057 				upper_32_bits(preq->request.dma), length_field,
2058 				field | ep_ring->cycle_state |
2059 				TRB_SETUPID(pdev->setup_id) |
2060 				pdev->setup_speed);
2061 
2062 		if (zlp) {
2063 			field = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
2064 
2065 			if (!pdev->ep0_expect_in)
2066 				field = TRB_ISP;
2067 
2068 			cdnsp_queue_trb(pdev, ep_ring, true,
2069 					lower_32_bits(preq->request.dma),
2070 					upper_32_bits(preq->request.dma), 0,
2071 					field | ep_ring->cycle_state |
2072 					TRB_SETUPID(pdev->setup_id) |
2073 					pdev->setup_speed);
2074 		}
2075 
2076 		pdev->ep0_stage = CDNSP_DATA_STAGE;
2077 	}
2078 
2079 	/* Save the DMA address of the last TRB in the TD. */
2080 	preq->td.last_trb = ep_ring->enqueue;
2081 
2082 	/* Queue status TRB. */
2083 	if (preq->request.length == 0)
2084 		field = ep_ring->cycle_state;
2085 	else
2086 		field = (ep_ring->cycle_state ^ 1);
2087 
2088 	if (preq->request.length > 0 && pdev->ep0_expect_in)
2089 		field |= TRB_DIR_IN;
2090 
2091 	if (pep->ep_state & EP0_HALTED_STATUS) {
2092 		pep->ep_state &= ~EP0_HALTED_STATUS;
2093 		field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
2094 	} else {
2095 		field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
2096 	}
2097 
2098 	cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2099 			field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
2100 			TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
2101 
2102 	cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
2103 
2104 	return 0;
2105 }
2106 
2107 int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2108 {
2109 	u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
2110 	int ret = 0;
2111 
2112 	if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED ||
2113 	    ep_state == EP_STATE_HALTED) {
2114 		trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
2115 		goto ep_stopped;
2116 	}
2117 
2118 	cdnsp_queue_stop_endpoint(pdev, pep->idx);
2119 	cdnsp_ring_cmd_db(pdev);
2120 	ret = cdnsp_wait_for_cmd_compl(pdev);
2121 
2122 	trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
2123 
2124 ep_stopped:
2125 	pep->ep_state |= EP_STOPPED;
2126 	return ret;
2127 }
2128 
2129 /*
2130  * The transfer burst count field of the isochronous TRB defines the number of
2131  * bursts that are required to move all packets in this TD. Only SuperSpeed
2132  * devices can burst up to bMaxBurst number of packets per service interval.
2133  * This field is zero based, meaning a value of zero in the field means one
2134  * burst. Basically, for everything but SuperSpeed devices, this field will be
2135  * zero.
2136  */
2137 static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
2138 					  struct cdnsp_request *preq,
2139 					  unsigned int total_packet_count)
2140 {
2141 	unsigned int max_burst;
2142 
2143 	if (pdev->gadget.speed < USB_SPEED_SUPER)
2144 		return 0;
2145 
2146 	max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2147 	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
2148 }
2149 
2150 /*
2151  * Returns the number of packets in the last "burst" of packets. This field is
2152  * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
2153  * the last burst packet count is equal to the total number of packets in the
2154  * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
2155  * must contain (bMaxBurst + 1) number of packets, but the last burst can
2156  * contain 1 to (bMaxBurst + 1) packets.
2157  */
2158 static unsigned int
2159 	cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
2160 					  struct cdnsp_request *preq,
2161 					  unsigned int total_packet_count)
2162 {
2163 	unsigned int max_burst;
2164 	unsigned int residue;
2165 
2166 	if (pdev->gadget.speed >= USB_SPEED_SUPER) {
2167 		/* bMaxBurst is zero based: 0 means 1 packet per burst. */
2168 		max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2169 		residue = total_packet_count % (max_burst + 1);
2170 
2171 		/*
2172 		 * If residue is zero, the last burst contains (max_burst + 1)
2173 		 * number of packets, but the TLBPC field is zero-based.
2174 		 */
2175 		if (residue == 0)
2176 			return max_burst;
2177 
2178 		return residue - 1;
2179 	}
2180 	if (total_packet_count == 0)
2181 		return 0;
2182 
2183 	return total_packet_count - 1;
2184 }
2185 
2186 /* Queue function isoc transfer */
2187 int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
2188 			struct cdnsp_request *preq)
2189 {
2190 	unsigned int trb_buff_len, td_len, td_remain_len, block_len;
2191 	unsigned int burst_count, last_burst_pkt;
2192 	unsigned int total_pkt_count, max_pkt;
2193 	struct cdnsp_generic_trb *start_trb;
2194 	struct scatterlist *sg = NULL;
2195 	bool more_trbs_coming = true;
2196 	struct cdnsp_ring *ep_ring;
2197 	unsigned int num_sgs = 0;
2198 	int running_total = 0;
2199 	u32 field, length_field;
2200 	u64 addr, send_addr;
2201 	int start_cycle;
2202 	int trbs_per_td;
2203 	int i, sent_len, ret;
2204 
2205 	ep_ring = preq->pep->ring;
2206 
2207 	td_len = preq->request.length;
2208 
2209 	if (preq->request.num_sgs) {
2210 		num_sgs = preq->request.num_sgs;
2211 		sg = preq->request.sg;
2212 		addr = (u64)sg_dma_address(sg);
2213 		block_len = sg_dma_len(sg);
2214 		trbs_per_td = count_sg_trbs_needed(preq);
2215 	} else {
2216 		addr = (u64)preq->request.dma;
2217 		block_len = td_len;
2218 		trbs_per_td = count_trbs_needed(preq);
2219 	}
2220 
2221 	ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
2222 	if (ret)
2223 		return ret;
2224 
2225 	start_trb = &ep_ring->enqueue->generic;
2226 	start_cycle = ep_ring->cycle_state;
2227 	td_remain_len = td_len;
2228 	send_addr = addr;
2229 
2230 	max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
2231 	total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
2232 
2233 	/* A zero-length transfer still involves at least one packet. */
2234 	if (total_pkt_count == 0)
2235 		total_pkt_count++;
2236 
2237 	burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
2238 	last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
2239 							   total_pkt_count);
2240 
2241 	/*
2242 	 * Set isoc specific data for the first TRB in a TD.
2243 	 * Prevent HW from getting the TRBs by keeping the cycle state
2244 	 * inverted in the first TDs isoc TRB.
2245 	 */
2246 	field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
2247 		TRB_SIA | TRB_TBC(burst_count);
2248 
2249 	if (!start_cycle)
2250 		field |= TRB_CYCLE;
2251 
2252 	/* Fill the rest of the TRB fields, and remaining normal TRBs. */
2253 	for (i = 0; i < trbs_per_td; i++) {
2254 		u32 remainder;
2255 
2256 		/* Calculate TRB length. */
2257 		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
2258 		trb_buff_len = min(trb_buff_len, block_len);
2259 		if (trb_buff_len > td_remain_len)
2260 			trb_buff_len = td_remain_len;
2261 
2262 		/* Set the TRB length, TD size, & interrupter fields. */
2263 		remainder = cdnsp_td_remainder(pdev, running_total,
2264 					       trb_buff_len, td_len, preq,
2265 					       more_trbs_coming, 0);
2266 
2267 		length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
2268 			TRB_INTR_TARGET(0);
2269 
2270 		/* Only first TRB is isoc, overwrite otherwise. */
2271 		if (i) {
2272 			field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2273 			length_field |= TRB_TD_SIZE(remainder);
2274 		} else {
2275 			length_field |= TRB_TD_SIZE_TBC(burst_count);
2276 		}
2277 
2278 		/* Only set interrupt on short packet for OUT EPs. */
2279 		if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
2280 			field |= TRB_ISP;
2281 
2282 		/* Set the chain bit for all except the last TRB. */
2283 		if (i < trbs_per_td - 1) {
2284 			more_trbs_coming = true;
2285 			field |= TRB_CHAIN;
2286 		} else {
2287 			more_trbs_coming = false;
2288 			preq->td.last_trb = ep_ring->enqueue;
2289 			field |= TRB_IOC;
2290 		}
2291 
2292 		cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2293 				lower_32_bits(send_addr), upper_32_bits(send_addr),
2294 				length_field, field);
2295 
2296 		running_total += trb_buff_len;
2297 		addr += trb_buff_len;
2298 		td_remain_len -= trb_buff_len;
2299 
2300 		sent_len = trb_buff_len;
2301 		while (sg && sent_len >= block_len) {
2302 			/* New sg entry */
2303 			--num_sgs;
2304 			sent_len -= block_len;
2305 			if (num_sgs != 0) {
2306 				sg = sg_next(sg);
2307 				block_len = sg_dma_len(sg);
2308 				addr = (u64)sg_dma_address(sg);
2309 				addr += sent_len;
2310 			}
2311 		}
2312 		block_len -= sent_len;
2313 		send_addr = addr;
2314 	}
2315 
2316 	/* Check TD length */
2317 	if (running_total != td_len) {
2318 		dev_err(pdev->dev, "ISOC TD length unmatch\n");
2319 		ret = -EINVAL;
2320 		goto cleanup;
2321 	}
2322 
2323 	cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
2324 				 start_cycle, start_trb);
2325 
2326 	return 0;
2327 
2328 cleanup:
2329 	/* Clean up a partially enqueued isoc transfer. */
2330 	list_del_init(&preq->td.td_list);
2331 	ep_ring->num_tds--;
2332 
2333 	/*
2334 	 * Use the first TD as a temporary variable to turn the TDs we've
2335 	 * queued into No-ops with a software-owned cycle bit.
2336 	 * That way the hardware won't accidentally start executing bogus TDs
2337 	 * when we partially overwrite them.
2338 	 * td->first_trb and td->start_seg are already set.
2339 	 */
2340 	preq->td.last_trb = ep_ring->enqueue;
2341 	/* Every TRB except the first & last will have its cycle bit flipped. */
2342 	cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2343 
2344 	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
2345 	ep_ring->enqueue = preq->td.first_trb;
2346 	ep_ring->enq_seg = preq->td.start_seg;
2347 	ep_ring->cycle_state = start_cycle;
2348 	return ret;
2349 }
2350 
2351 /****		Command Ring Operations		****/
2352 /*
2353  * Generic function for queuing a command TRB on the command ring.
2354  * Driver queue only one command to ring in the moment.
2355  */
2356 static void cdnsp_queue_command(struct cdnsp_device *pdev,
2357 				u32 field1,
2358 				u32 field2,
2359 				u32 field3,
2360 				u32 field4)
2361 {
2362 	cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
2363 			   GFP_ATOMIC);
2364 
2365 	pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
2366 
2367 	cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
2368 			field3, field4 | pdev->cmd_ring->cycle_state);
2369 }
2370 
2371 /* Queue a slot enable or disable request on the command ring */
2372 void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
2373 {
2374 	cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
2375 			    SLOT_ID_FOR_TRB(pdev->slot_id));
2376 }
2377 
2378 /* Queue an address device command TRB */
2379 void cdnsp_queue_address_device(struct cdnsp_device *pdev,
2380 				dma_addr_t in_ctx_ptr,
2381 				enum cdnsp_setup_dev setup)
2382 {
2383 	cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2384 			    upper_32_bits(in_ctx_ptr), 0,
2385 			    TRB_TYPE(TRB_ADDR_DEV) |
2386 			    SLOT_ID_FOR_TRB(pdev->slot_id) |
2387 			    (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
2388 }
2389 
2390 /* Queue a reset device command TRB */
2391 void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
2392 {
2393 	cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
2394 			    SLOT_ID_FOR_TRB(pdev->slot_id));
2395 }
2396 
2397 /* Queue a configure endpoint command TRB */
2398 void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
2399 				    dma_addr_t in_ctx_ptr)
2400 {
2401 	cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2402 			    upper_32_bits(in_ctx_ptr), 0,
2403 			    TRB_TYPE(TRB_CONFIG_EP) |
2404 			    SLOT_ID_FOR_TRB(pdev->slot_id));
2405 }
2406 
2407 /*
2408  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
2409  * activity on an endpoint that is about to be suspended.
2410  */
2411 void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2412 {
2413 	cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
2414 			    EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
2415 }
2416 
2417 /* Set Transfer Ring Dequeue Pointer command. */
2418 void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
2419 				   struct cdnsp_ep *pep,
2420 				   struct cdnsp_dequeue_state *deq_state)
2421 {
2422 	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
2423 	u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
2424 	u32 type = TRB_TYPE(TRB_SET_DEQ);
2425 	u32 trb_sct = 0;
2426 	dma_addr_t addr;
2427 
2428 	addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
2429 				     deq_state->new_deq_ptr);
2430 
2431 	if (deq_state->stream_id)
2432 		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
2433 
2434 	cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
2435 			    deq_state->new_cycle_state, upper_32_bits(addr),
2436 			    trb_stream_id, trb_slot_id |
2437 			    EP_ID_FOR_TRB(pep->idx) | type);
2438 }
2439 
2440 void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
2441 {
2442 	return cdnsp_queue_command(pdev, 0, 0, 0,
2443 				   SLOT_ID_FOR_TRB(pdev->slot_id) |
2444 				   EP_ID_FOR_TRB(ep_index) |
2445 				   TRB_TYPE(TRB_RESET_EP));
2446 }
2447 
2448 /*
2449  * Queue a halt endpoint request on the command ring.
2450  */
2451 void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2452 {
2453 	cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
2454 			    SLOT_ID_FOR_TRB(pdev->slot_id) |
2455 			    EP_ID_FOR_TRB(ep_index));
2456 }
2457 
2458 void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
2459 {
2460 	u32 lo, mid;
2461 
2462 	lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
2463 	     TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
2464 	mid = TRB_FH_TR_PACKET_DEV_NOT |
2465 	      TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
2466 	      TRB_FH_TO_INTERFACE(intf_num);
2467 
2468 	cdnsp_queue_command(pdev, lo, mid, 0,
2469 			    TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
2470 }
2471