xref: /linux/drivers/usb/mtu3/mtu3_qmu.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mtu3_qmu.c - Queue Management Unit driver for device controller
4  *
5  * Copyright (C) 2016 MediaTek Inc.
6  *
7  * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
8  */
9 
10 /*
11  * Queue Management Unit (QMU) is designed to unload SW effort
12  * to serve DMA interrupts.
13  * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14  * SW links data buffers and triggers QMU to send / receive data to
15  * host / from device at a time.
16  * And now only GPD is supported.
17  *
18  * For more detailed information, please refer to QMU Programming Guide
19  */
20 
21 #include <linux/dmapool.h>
22 #include <linux/iopoll.h>
23 
24 #include "mtu3.h"
25 #include "mtu3_trace.h"
26 
27 #define QMU_CHECKSUM_LEN	16
28 
29 #define GPD_FLAGS_HWO	BIT(0)
30 #define GPD_FLAGS_BDP	BIT(1)
31 #define GPD_FLAGS_BPS	BIT(2)
32 #define GPD_FLAGS_ZLP	BIT(6)
33 #define GPD_FLAGS_IOC	BIT(7)
34 #define GET_GPD_HWO(gpd)	(le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
35 
36 #define GPD_RX_BUF_LEN_OG(x)	(((x) & 0xffff) << 16)
37 #define GPD_RX_BUF_LEN_EL(x)	(((x) & 0xfffff) << 12)
38 #define GPD_RX_BUF_LEN(mtu, x)	\
39 ({				\
40 	typeof(x) x_ = (x);	\
41 	((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
42 })
43 
44 #define GPD_DATA_LEN_OG(x)	((x) & 0xffff)
45 #define GPD_DATA_LEN_EL(x)	((x) & 0xfffff)
46 #define GPD_DATA_LEN(mtu, x)	\
47 ({				\
48 	typeof(x) x_ = (x);	\
49 	((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
50 })
51 
52 #define GPD_EXT_FLAG_ZLP	BIT(29)
53 #define GPD_EXT_NGP_OG(x)	(((x) & 0xf) << 20)
54 #define GPD_EXT_BUF_OG(x)	(((x) & 0xf) << 16)
55 #define GPD_EXT_NGP_EL(x)	(((x) & 0xf) << 28)
56 #define GPD_EXT_BUF_EL(x)	(((x) & 0xf) << 24)
57 #define GPD_EXT_NGP(mtu, x)	\
58 ({				\
59 	typeof(x) x_ = (x);	\
60 	((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
61 })
62 
63 #define GPD_EXT_BUF(mtu, x)	\
64 ({				\
65 	typeof(x) x_ = (x);	\
66 	((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
67 })
68 
69 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
70 #define HILO_DMA(hi, lo)	\
71 	((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
72 
73 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
74 {
75 	u32 txcpr;
76 	u32 txhiar;
77 
78 	txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
79 	txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
80 
81 	return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
82 }
83 
84 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
85 {
86 	u32 rxcpr;
87 	u32 rxhiar;
88 
89 	rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
90 	rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
91 
92 	return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
93 }
94 
95 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
96 {
97 	u32 tqhiar;
98 
99 	mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
100 		    cpu_to_le32(lower_32_bits(dma)));
101 	tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
102 	tqhiar &= ~QMU_START_ADDR_HI_MSK;
103 	tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
104 	mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
105 }
106 
107 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
108 {
109 	u32 rqhiar;
110 
111 	mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
112 		    cpu_to_le32(lower_32_bits(dma)));
113 	rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
114 	rqhiar &= ~QMU_START_ADDR_HI_MSK;
115 	rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
116 	mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
117 }
118 
119 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
120 		dma_addr_t dma_addr)
121 {
122 	dma_addr_t dma_base = ring->dma;
123 	struct qmu_gpd *gpd_head = ring->start;
124 	u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
125 
126 	if (offset >= MAX_GPD_NUM)
127 		return NULL;
128 
129 	return gpd_head + offset;
130 }
131 
132 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
133 		struct qmu_gpd *gpd)
134 {
135 	dma_addr_t dma_base = ring->dma;
136 	struct qmu_gpd *gpd_head = ring->start;
137 	u32 offset;
138 
139 	offset = gpd - gpd_head;
140 	if (offset >= MAX_GPD_NUM)
141 		return 0;
142 
143 	return dma_base + (offset * sizeof(*gpd));
144 }
145 
146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
147 {
148 	ring->start = gpd;
149 	ring->enqueue = gpd;
150 	ring->dequeue = gpd;
151 	ring->end = gpd + MAX_GPD_NUM - 1;
152 }
153 
154 static void reset_gpd_list(struct mtu3_ep *mep)
155 {
156 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
157 	struct qmu_gpd *gpd = ring->start;
158 
159 	if (gpd) {
160 		gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
161 		gpd_ring_init(ring, gpd);
162 	}
163 }
164 
165 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
166 {
167 	struct qmu_gpd *gpd;
168 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
169 
170 	/* software own all gpds as default */
171 	gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
172 	if (gpd == NULL)
173 		return -ENOMEM;
174 
175 	gpd_ring_init(ring, gpd);
176 
177 	return 0;
178 }
179 
180 void mtu3_gpd_ring_free(struct mtu3_ep *mep)
181 {
182 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
183 
184 	dma_pool_free(mep->mtu->qmu_gpd_pool,
185 			ring->start, ring->dma);
186 	memset(ring, 0, sizeof(*ring));
187 }
188 
189 void mtu3_qmu_resume(struct mtu3_ep *mep)
190 {
191 	struct mtu3 *mtu = mep->mtu;
192 	void __iomem *mbase = mtu->mac_base;
193 	int epnum = mep->epnum;
194 	u32 offset;
195 
196 	offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
197 
198 	mtu3_writel(mbase, offset, QMU_Q_RESUME);
199 	if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
200 		mtu3_writel(mbase, offset, QMU_Q_RESUME);
201 }
202 
203 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
204 {
205 	if (ring->enqueue < ring->end)
206 		ring->enqueue++;
207 	else
208 		ring->enqueue = ring->start;
209 
210 	return ring->enqueue;
211 }
212 
213 /* @dequeue may be NULL if ring is unallocated or freed */
214 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
215 {
216 	if (ring->dequeue < ring->end)
217 		ring->dequeue++;
218 	else
219 		ring->dequeue = ring->start;
220 
221 	return ring->dequeue;
222 }
223 
224 /* check if a ring is emtpy */
225 static bool gpd_ring_empty(struct mtu3_gpd_ring *ring)
226 {
227 	struct qmu_gpd *enq = ring->enqueue;
228 	struct qmu_gpd *next;
229 
230 	if (ring->enqueue < ring->end)
231 		next = enq + 1;
232 	else
233 		next = ring->start;
234 
235 	/* one gpd is reserved to simplify gpd preparation */
236 	return next == ring->dequeue;
237 }
238 
239 int mtu3_prepare_transfer(struct mtu3_ep *mep)
240 {
241 	return gpd_ring_empty(&mep->gpd_ring);
242 }
243 
244 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
245 {
246 	struct qmu_gpd *enq;
247 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
248 	struct qmu_gpd *gpd = ring->enqueue;
249 	struct usb_request *req = &mreq->request;
250 	struct mtu3 *mtu = mep->mtu;
251 	dma_addr_t enq_dma;
252 	u32 ext_addr;
253 
254 	gpd->dw0_info = 0;	/* SW own it */
255 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
256 	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
257 	gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
258 
259 	/* get the next GPD */
260 	enq = advance_enq_gpd(ring);
261 	enq_dma = gpd_virt_to_dma(ring, enq);
262 	dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
263 		mep->epnum, gpd, enq, &enq_dma);
264 
265 	enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
266 	gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
267 	ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
268 	gpd->dw0_info = cpu_to_le32(ext_addr);
269 
270 	if (req->zero) {
271 		if (mtu->gen2cp)
272 			gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
273 		else
274 			gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
275 	}
276 
277 	/* prevent reorder, make sure GPD's HWO is set last */
278 	mb();
279 	gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
280 
281 	mreq->gpd = gpd;
282 	trace_mtu3_prepare_gpd(mep, gpd);
283 
284 	return 0;
285 }
286 
287 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
288 {
289 	struct qmu_gpd *enq;
290 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
291 	struct qmu_gpd *gpd = ring->enqueue;
292 	struct usb_request *req = &mreq->request;
293 	struct mtu3 *mtu = mep->mtu;
294 	dma_addr_t enq_dma;
295 	u32 ext_addr;
296 
297 	gpd->dw0_info = 0;	/* SW own it */
298 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
299 	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
300 	gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
301 
302 	/* get the next GPD */
303 	enq = advance_enq_gpd(ring);
304 	enq_dma = gpd_virt_to_dma(ring, enq);
305 	dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
306 		mep->epnum, gpd, enq, &enq_dma);
307 
308 	enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
309 	gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
310 	ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
311 	gpd->dw3_info = cpu_to_le32(ext_addr);
312 	/* prevent reorder, make sure GPD's HWO is set last */
313 	mb();
314 	gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
315 
316 	mreq->gpd = gpd;
317 	trace_mtu3_prepare_gpd(mep, gpd);
318 
319 	return 0;
320 }
321 
322 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
323 {
324 
325 	if (mep->is_in)
326 		mtu3_prepare_tx_gpd(mep, mreq);
327 	else
328 		mtu3_prepare_rx_gpd(mep, mreq);
329 }
330 
331 int mtu3_qmu_start(struct mtu3_ep *mep)
332 {
333 	struct mtu3 *mtu = mep->mtu;
334 	void __iomem *mbase = mtu->mac_base;
335 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
336 	u8 epnum = mep->epnum;
337 
338 	if (mep->is_in) {
339 		/* set QMU start address */
340 		write_txq_start_addr(mbase, epnum, ring->dma);
341 		mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
342 		/* send zero length packet according to ZLP flag in GPD */
343 		mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
344 		mtu3_writel(mbase, U3D_TQERRIESR0,
345 				QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
346 
347 		if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
348 			dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
349 			return 0;
350 		}
351 		mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
352 
353 	} else {
354 		write_rxq_start_addr(mbase, epnum, ring->dma);
355 		mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
356 		/* don't expect ZLP */
357 		mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
358 		/* move to next GPD when receive ZLP */
359 		mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
360 		mtu3_writel(mbase, U3D_RQERRIESR0,
361 				QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
362 		mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
363 
364 		if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
365 			dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
366 			return 0;
367 		}
368 		mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
369 	}
370 
371 	return 0;
372 }
373 
374 /* may called in atomic context */
375 void mtu3_qmu_stop(struct mtu3_ep *mep)
376 {
377 	struct mtu3 *mtu = mep->mtu;
378 	void __iomem *mbase = mtu->mac_base;
379 	int epnum = mep->epnum;
380 	u32 value = 0;
381 	u32 qcsr;
382 	int ret;
383 
384 	qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
385 
386 	if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
387 		dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
388 		return;
389 	}
390 	mtu3_writel(mbase, qcsr, QMU_Q_STOP);
391 
392 	if (mep->is_in)
393 		mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
394 
395 	ret = readl_poll_timeout_atomic(mbase + qcsr, value,
396 			!(value & QMU_Q_ACTIVE), 1, 1000);
397 	if (ret) {
398 		dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
399 		return;
400 	}
401 
402 	/* flush fifo again to make sure the fifo is empty */
403 	if (mep->is_in)
404 		mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
405 
406 	dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
407 }
408 
409 void mtu3_qmu_flush(struct mtu3_ep *mep)
410 {
411 
412 	dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
413 		((mep->is_in) ? "TX" : "RX"));
414 
415 	/*Stop QMU */
416 	mtu3_qmu_stop(mep);
417 	reset_gpd_list(mep);
418 }
419 
420 /*
421  * QMU can't transfer zero length packet directly (a hardware limit
422  * on old SoCs), so when needs to send ZLP, we intentionally trigger
423  * a length error interrupt, and in the ISR sends a ZLP by BMU.
424  */
425 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
426 {
427 	struct mtu3_ep *mep = mtu->in_eps + epnum;
428 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
429 	void __iomem *mbase = mtu->mac_base;
430 	struct qmu_gpd *gpd_current = NULL;
431 	struct mtu3_request *mreq;
432 	dma_addr_t cur_gpd_dma;
433 	u32 txcsr = 0;
434 	int ret;
435 
436 	mreq = next_request(mep);
437 	if (mreq && mreq->request.length != 0)
438 		return;
439 
440 	cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
441 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
442 
443 	if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
444 		dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
445 		return;
446 	}
447 
448 	dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
449 	trace_mtu3_zlp_exp_gpd(mep, gpd_current);
450 
451 	mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
452 
453 	ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
454 			txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
455 	if (ret) {
456 		dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
457 		return;
458 	}
459 	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
460 	/* prevent reorder, make sure GPD's HWO is set last */
461 	mb();
462 	/* by pass the current GDP */
463 	gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
464 
465 	/*enable DMAREQEN, switch back to QMU mode */
466 	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
467 	mtu3_qmu_resume(mep);
468 }
469 
470 /*
471  * when rx error happens (except zlperr), QMU will stop, and RQCPR saves
472  * the GPD encountered error, Done irq will arise after resuming QMU again.
473  */
474 static void qmu_error_rx(struct mtu3 *mtu, u8 epnum)
475 {
476 	struct mtu3_ep *mep = mtu->out_eps + epnum;
477 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
478 	struct qmu_gpd *gpd_current = NULL;
479 	struct mtu3_request *mreq;
480 	dma_addr_t cur_gpd_dma;
481 
482 	cur_gpd_dma = read_rxq_cur_addr(mtu->mac_base, epnum);
483 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
484 
485 	mreq = next_request(mep);
486 	if (!mreq || mreq->gpd != gpd_current) {
487 		dev_err(mtu->dev, "no correct RX req is found\n");
488 		return;
489 	}
490 
491 	mreq->request.status = -EAGAIN;
492 
493 	/* by pass the current GDP */
494 	gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
495 	mtu3_qmu_resume(mep);
496 
497 	dev_dbg(mtu->dev, "%s EP%d, current=%p, req=%p\n",
498 		__func__, epnum, gpd_current, mreq);
499 }
500 
501 /*
502  * NOTE: request list maybe is already empty as following case:
503  * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
504  * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
505  * tasklet process both of them)-->qmu_interrupt for second one.
506  * To avoid upper case, put qmu_done_tx in ISR directly to process it.
507  */
508 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
509 {
510 	struct mtu3_ep *mep = mtu->in_eps + epnum;
511 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
512 	void __iomem *mbase = mtu->mac_base;
513 	struct qmu_gpd *gpd = ring->dequeue;
514 	struct qmu_gpd *gpd_current = NULL;
515 	struct usb_request *request = NULL;
516 	struct mtu3_request *mreq;
517 	dma_addr_t cur_gpd_dma;
518 
519 	/*transfer phy address got from QMU register to virtual address */
520 	cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
521 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
522 
523 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
524 		__func__, epnum, gpd, gpd_current, ring->enqueue);
525 
526 	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
527 
528 		mreq = next_request(mep);
529 
530 		if (mreq == NULL || mreq->gpd != gpd) {
531 			dev_err(mtu->dev, "no correct TX req is found\n");
532 			break;
533 		}
534 
535 		request = &mreq->request;
536 		request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
537 		trace_mtu3_complete_gpd(mep, gpd);
538 		mtu3_req_complete(mep, request, 0);
539 
540 		gpd = advance_deq_gpd(ring);
541 	}
542 
543 	dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
544 		__func__, epnum, ring->dequeue, ring->enqueue);
545 
546 }
547 
548 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
549 {
550 	struct mtu3_ep *mep = mtu->out_eps + epnum;
551 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
552 	void __iomem *mbase = mtu->mac_base;
553 	struct qmu_gpd *gpd = ring->dequeue;
554 	struct qmu_gpd *gpd_current = NULL;
555 	struct usb_request *req = NULL;
556 	struct mtu3_request *mreq;
557 	dma_addr_t cur_gpd_dma;
558 
559 	cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
560 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
561 
562 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
563 		__func__, epnum, gpd, gpd_current, ring->enqueue);
564 
565 	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
566 
567 		mreq = next_request(mep);
568 
569 		if (mreq == NULL || mreq->gpd != gpd) {
570 			dev_err(mtu->dev, "no correct RX req is found\n");
571 			break;
572 		}
573 		req = &mreq->request;
574 
575 		req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
576 		trace_mtu3_complete_gpd(mep, gpd);
577 		mtu3_req_complete(mep, req, 0);
578 
579 		gpd = advance_deq_gpd(ring);
580 	}
581 
582 	dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
583 		__func__, epnum, ring->dequeue, ring->enqueue);
584 }
585 
586 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
587 {
588 	int i;
589 
590 	for (i = 1; i < mtu->num_eps; i++) {
591 		if (done_status & QMU_RX_DONE_INT(i))
592 			qmu_done_rx(mtu, i);
593 		if (done_status & QMU_TX_DONE_INT(i))
594 			qmu_done_tx(mtu, i);
595 	}
596 }
597 
598 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
599 {
600 	void __iomem *mbase = mtu->mac_base;
601 	u32 errval;
602 	int i;
603 
604 	if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
605 		errval = mtu3_readl(mbase, U3D_RQERRIR0);
606 		mtu3_writel(mbase, U3D_RQERRIR0, errval);
607 
608 		for (i = 1; i < mtu->num_eps; i++) {
609 			if (errval & QMU_RX_CS_ERR(i))
610 				dev_err(mtu->dev, "Rx %d CS error!\n", i);
611 
612 			if (errval & QMU_RX_LEN_ERR(i))
613 				dev_err(mtu->dev, "RX %d Length error\n", i);
614 
615 			if (errval & (QMU_RX_CS_ERR(i) | QMU_RX_LEN_ERR(i)))
616 				qmu_error_rx(mtu, i);
617 		}
618 	}
619 
620 	if (qmu_status & RXQ_ZLPERR_INT) {
621 		errval = mtu3_readl(mbase, U3D_RQERRIR1);
622 		for (i = 1; i < mtu->num_eps; i++) {
623 			if (errval & QMU_RX_ZLP_ERR(i))
624 				dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
625 		}
626 		mtu3_writel(mbase, U3D_RQERRIR1, errval);
627 	}
628 
629 	if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
630 		errval = mtu3_readl(mbase, U3D_TQERRIR0);
631 		for (i = 1; i < mtu->num_eps; i++) {
632 			if (errval & QMU_TX_CS_ERR(i))
633 				dev_err(mtu->dev, "Tx %d checksum error!\n", i);
634 
635 			if (errval & QMU_TX_LEN_ERR(i))
636 				qmu_tx_zlp_error_handler(mtu, i);
637 		}
638 		mtu3_writel(mbase, U3D_TQERRIR0, errval);
639 	}
640 }
641 
642 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
643 {
644 	void __iomem *mbase = mtu->mac_base;
645 	u32 qmu_status;
646 	u32 qmu_done_status;
647 
648 	/* U3D_QISAR1 is read update */
649 	qmu_status = mtu3_readl(mbase, U3D_QISAR1);
650 	qmu_status &= mtu3_readl(mbase, U3D_QIER1);
651 
652 	qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
653 	qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
654 	mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
655 	dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
656 		(qmu_done_status & 0xFFFF), qmu_done_status >> 16,
657 		qmu_status);
658 	trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
659 
660 	if (qmu_done_status)
661 		qmu_done_isr(mtu, qmu_done_status);
662 
663 	if (qmu_status)
664 		qmu_exception_isr(mtu, qmu_status);
665 
666 	return IRQ_HANDLED;
667 }
668 
669 int mtu3_qmu_init(struct mtu3 *mtu)
670 {
671 
672 	compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
673 
674 	mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
675 			QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
676 
677 	if (!mtu->qmu_gpd_pool)
678 		return -ENOMEM;
679 
680 	return 0;
681 }
682 
683 void mtu3_qmu_exit(struct mtu3 *mtu)
684 {
685 	dma_pool_destroy(mtu->qmu_gpd_pool);
686 }
687