1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mtu3_qmu.c - Queue Management Unit driver for device controller 4 * 5 * Copyright (C) 2016 MediaTek Inc. 6 * 7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 */ 9 10 /* 11 * Queue Management Unit (QMU) is designed to unload SW effort 12 * to serve DMA interrupts. 13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), 14 * SW links data buffers and triggers QMU to send / receive data to 15 * host / from device at a time. 16 * And now only GPD is supported. 17 * 18 * For more detailed information, please refer to QMU Programming Guide 19 */ 20 21 #include <linux/dmapool.h> 22 #include <linux/iopoll.h> 23 24 #include "mtu3.h" 25 #include "mtu3_trace.h" 26 27 #define QMU_CHECKSUM_LEN 16 28 29 #define GPD_FLAGS_HWO BIT(0) 30 #define GPD_FLAGS_BDP BIT(1) 31 #define GPD_FLAGS_BPS BIT(2) 32 #define GPD_FLAGS_ZLP BIT(6) 33 #define GPD_FLAGS_IOC BIT(7) 34 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) 35 36 #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16) 37 #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12) 38 #define GPD_RX_BUF_LEN(mtu, x) \ 39 ({ \ 40 typeof(x) x_ = (x); \ 41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \ 42 }) 43 44 #define GPD_DATA_LEN_OG(x) ((x) & 0xffff) 45 #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff) 46 #define GPD_DATA_LEN(mtu, x) \ 47 ({ \ 48 typeof(x) x_ = (x); \ 49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \ 50 }) 51 52 #define GPD_EXT_FLAG_ZLP BIT(29) 53 #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20) 54 #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16) 55 #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28) 56 #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24) 57 #define GPD_EXT_NGP(mtu, x) \ 58 ({ \ 59 typeof(x) x_ = (x); \ 60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \ 61 }) 62 63 #define GPD_EXT_BUF(mtu, x) \ 64 ({ \ 65 typeof(x) x_ = (x); \ 66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \ 67 }) 68 69 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) 70 #define HILO_DMA(hi, lo) \ 71 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo)))) 72 73 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum) 74 { 75 u32 txcpr; 76 u32 txhiar; 77 78 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); 79 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 80 81 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr); 82 } 83 84 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum) 85 { 86 u32 rxcpr; 87 u32 rxhiar; 88 89 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); 90 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 91 92 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr); 93 } 94 95 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 96 { 97 u32 tqhiar; 98 99 mtu3_writel(mbase, USB_QMU_TQSAR(epnum), 100 cpu_to_le32(lower_32_bits(dma))); 101 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 102 tqhiar &= ~QMU_START_ADDR_HI_MSK; 103 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 104 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar); 105 } 106 107 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 108 { 109 u32 rqhiar; 110 111 mtu3_writel(mbase, USB_QMU_RQSAR(epnum), 112 cpu_to_le32(lower_32_bits(dma))); 113 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 114 rqhiar &= ~QMU_START_ADDR_HI_MSK; 115 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 116 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar); 117 } 118 119 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, 120 dma_addr_t dma_addr) 121 { 122 dma_addr_t dma_base = ring->dma; 123 struct qmu_gpd *gpd_head = ring->start; 124 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); 125 126 if (offset >= MAX_GPD_NUM) 127 return NULL; 128 129 return gpd_head + offset; 130 } 131 132 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, 133 struct qmu_gpd *gpd) 134 { 135 dma_addr_t dma_base = ring->dma; 136 struct qmu_gpd *gpd_head = ring->start; 137 u32 offset; 138 139 offset = gpd - gpd_head; 140 if (offset >= MAX_GPD_NUM) 141 return 0; 142 143 return dma_base + (offset * sizeof(*gpd)); 144 } 145 146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) 147 { 148 ring->start = gpd; 149 ring->enqueue = gpd; 150 ring->dequeue = gpd; 151 ring->end = gpd + MAX_GPD_NUM - 1; 152 } 153 154 static void reset_gpd_list(struct mtu3_ep *mep) 155 { 156 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 157 struct qmu_gpd *gpd = ring->start; 158 159 if (gpd) { 160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 161 gpd_ring_init(ring, gpd); 162 } 163 } 164 165 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) 166 { 167 struct qmu_gpd *gpd; 168 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 169 170 /* software own all gpds as default */ 171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); 172 if (gpd == NULL) 173 return -ENOMEM; 174 175 gpd_ring_init(ring, gpd); 176 177 return 0; 178 } 179 180 void mtu3_gpd_ring_free(struct mtu3_ep *mep) 181 { 182 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 183 184 dma_pool_free(mep->mtu->qmu_gpd_pool, 185 ring->start, ring->dma); 186 memset(ring, 0, sizeof(*ring)); 187 } 188 189 void mtu3_qmu_resume(struct mtu3_ep *mep) 190 { 191 struct mtu3 *mtu = mep->mtu; 192 void __iomem *mbase = mtu->mac_base; 193 int epnum = mep->epnum; 194 u32 offset; 195 196 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 197 198 mtu3_writel(mbase, offset, QMU_Q_RESUME); 199 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) 200 mtu3_writel(mbase, offset, QMU_Q_RESUME); 201 } 202 203 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) 204 { 205 if (ring->enqueue < ring->end) 206 ring->enqueue++; 207 else 208 ring->enqueue = ring->start; 209 210 return ring->enqueue; 211 } 212 213 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) 214 { 215 if (ring->dequeue < ring->end) 216 ring->dequeue++; 217 else 218 ring->dequeue = ring->start; 219 220 return ring->dequeue; 221 } 222 223 /* check if a ring is emtpy */ 224 static int gpd_ring_empty(struct mtu3_gpd_ring *ring) 225 { 226 struct qmu_gpd *enq = ring->enqueue; 227 struct qmu_gpd *next; 228 229 if (ring->enqueue < ring->end) 230 next = enq + 1; 231 else 232 next = ring->start; 233 234 /* one gpd is reserved to simplify gpd preparation */ 235 return next == ring->dequeue; 236 } 237 238 int mtu3_prepare_transfer(struct mtu3_ep *mep) 239 { 240 return gpd_ring_empty(&mep->gpd_ring); 241 } 242 243 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 244 { 245 struct qmu_gpd *enq; 246 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 247 struct qmu_gpd *gpd = ring->enqueue; 248 struct usb_request *req = &mreq->request; 249 struct mtu3 *mtu = mep->mtu; 250 dma_addr_t enq_dma; 251 u32 ext_addr; 252 253 gpd->dw0_info = 0; /* SW own it */ 254 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 255 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); 256 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length)); 257 258 /* get the next GPD */ 259 enq = advance_enq_gpd(ring); 260 enq_dma = gpd_virt_to_dma(ring, enq); 261 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 262 mep->epnum, gpd, enq, &enq_dma); 263 264 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 265 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 266 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); 267 gpd->dw0_info = cpu_to_le32(ext_addr); 268 269 if (req->zero) { 270 if (mtu->gen2cp) 271 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP); 272 else 273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); 274 } 275 276 /* prevent reorder, make sure GPD's HWO is set last */ 277 mb(); 278 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 279 280 mreq->gpd = gpd; 281 trace_mtu3_prepare_gpd(mep, gpd); 282 283 return 0; 284 } 285 286 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 287 { 288 struct qmu_gpd *enq; 289 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 290 struct qmu_gpd *gpd = ring->enqueue; 291 struct usb_request *req = &mreq->request; 292 struct mtu3 *mtu = mep->mtu; 293 dma_addr_t enq_dma; 294 u32 ext_addr; 295 296 gpd->dw0_info = 0; /* SW own it */ 297 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 298 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); 299 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length)); 300 301 /* get the next GPD */ 302 enq = advance_enq_gpd(ring); 303 enq_dma = gpd_virt_to_dma(ring, enq); 304 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 305 mep->epnum, gpd, enq, &enq_dma); 306 307 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 308 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 309 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); 310 gpd->dw3_info = cpu_to_le32(ext_addr); 311 /* prevent reorder, make sure GPD's HWO is set last */ 312 mb(); 313 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 314 315 mreq->gpd = gpd; 316 trace_mtu3_prepare_gpd(mep, gpd); 317 318 return 0; 319 } 320 321 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 322 { 323 324 if (mep->is_in) 325 mtu3_prepare_tx_gpd(mep, mreq); 326 else 327 mtu3_prepare_rx_gpd(mep, mreq); 328 } 329 330 int mtu3_qmu_start(struct mtu3_ep *mep) 331 { 332 struct mtu3 *mtu = mep->mtu; 333 void __iomem *mbase = mtu->mac_base; 334 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 335 u8 epnum = mep->epnum; 336 337 if (mep->is_in) { 338 /* set QMU start address */ 339 write_txq_start_addr(mbase, epnum, ring->dma); 340 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); 341 /* send zero length packet according to ZLP flag in GPD */ 342 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); 343 mtu3_writel(mbase, U3D_TQERRIESR0, 344 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); 345 346 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { 347 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); 348 return 0; 349 } 350 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); 351 352 } else { 353 write_rxq_start_addr(mbase, epnum, ring->dma); 354 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); 355 /* don't expect ZLP */ 356 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); 357 /* move to next GPD when receive ZLP */ 358 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); 359 mtu3_writel(mbase, U3D_RQERRIESR0, 360 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); 361 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); 362 363 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { 364 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); 365 return 0; 366 } 367 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); 368 } 369 370 return 0; 371 } 372 373 /* may called in atomic context */ 374 void mtu3_qmu_stop(struct mtu3_ep *mep) 375 { 376 struct mtu3 *mtu = mep->mtu; 377 void __iomem *mbase = mtu->mac_base; 378 int epnum = mep->epnum; 379 u32 value = 0; 380 u32 qcsr; 381 int ret; 382 383 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 384 385 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { 386 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); 387 return; 388 } 389 mtu3_writel(mbase, qcsr, QMU_Q_STOP); 390 391 if (mep->is_in) 392 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO); 393 394 ret = readl_poll_timeout_atomic(mbase + qcsr, value, 395 !(value & QMU_Q_ACTIVE), 1, 1000); 396 if (ret) { 397 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); 398 return; 399 } 400 401 /* flush fifo again to make sure the fifo is empty */ 402 if (mep->is_in) 403 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO); 404 405 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); 406 } 407 408 void mtu3_qmu_flush(struct mtu3_ep *mep) 409 { 410 411 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, 412 ((mep->is_in) ? "TX" : "RX")); 413 414 /*Stop QMU */ 415 mtu3_qmu_stop(mep); 416 reset_gpd_list(mep); 417 } 418 419 /* 420 * QMU can't transfer zero length packet directly (a hardware limit 421 * on old SoCs), so when needs to send ZLP, we intentionally trigger 422 * a length error interrupt, and in the ISR sends a ZLP by BMU. 423 */ 424 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) 425 { 426 struct mtu3_ep *mep = mtu->in_eps + epnum; 427 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 428 void __iomem *mbase = mtu->mac_base; 429 struct qmu_gpd *gpd_current = NULL; 430 struct mtu3_request *mreq; 431 dma_addr_t cur_gpd_dma; 432 u32 txcsr = 0; 433 int ret; 434 435 mreq = next_request(mep); 436 if (mreq && mreq->request.length != 0) 437 return; 438 439 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 440 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 441 442 if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) { 443 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); 444 return; 445 } 446 447 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); 448 trace_mtu3_zlp_exp_gpd(mep, gpd_current); 449 450 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 451 452 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum), 453 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000); 454 if (ret) { 455 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__); 456 return; 457 } 458 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); 459 /* prevent reorder, make sure GPD's HWO is set last */ 460 mb(); 461 /* by pass the current GDP */ 462 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); 463 464 /*enable DMAREQEN, switch back to QMU mode */ 465 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 466 mtu3_qmu_resume(mep); 467 } 468 469 /* 470 * NOTE: request list maybe is already empty as following case: 471 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> 472 * queue_tx --> process_tasklet(meanwhile, the second one is transferred, 473 * tasklet process both of them)-->qmu_interrupt for second one. 474 * To avoid upper case, put qmu_done_tx in ISR directly to process it. 475 */ 476 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) 477 { 478 struct mtu3_ep *mep = mtu->in_eps + epnum; 479 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 480 void __iomem *mbase = mtu->mac_base; 481 struct qmu_gpd *gpd = ring->dequeue; 482 struct qmu_gpd *gpd_current = NULL; 483 struct usb_request *request = NULL; 484 struct mtu3_request *mreq; 485 dma_addr_t cur_gpd_dma; 486 487 /*transfer phy address got from QMU register to virtual address */ 488 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 489 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 490 491 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 492 __func__, epnum, gpd, gpd_current, ring->enqueue); 493 494 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { 495 496 mreq = next_request(mep); 497 498 if (mreq == NULL || mreq->gpd != gpd) { 499 dev_err(mtu->dev, "no correct TX req is found\n"); 500 break; 501 } 502 503 request = &mreq->request; 504 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); 505 trace_mtu3_complete_gpd(mep, gpd); 506 mtu3_req_complete(mep, request, 0); 507 508 gpd = advance_deq_gpd(ring); 509 } 510 511 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 512 __func__, epnum, ring->dequeue, ring->enqueue); 513 514 } 515 516 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) 517 { 518 struct mtu3_ep *mep = mtu->out_eps + epnum; 519 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 520 void __iomem *mbase = mtu->mac_base; 521 struct qmu_gpd *gpd = ring->dequeue; 522 struct qmu_gpd *gpd_current = NULL; 523 struct usb_request *req = NULL; 524 struct mtu3_request *mreq; 525 dma_addr_t cur_gpd_dma; 526 527 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum); 528 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 529 530 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 531 __func__, epnum, gpd, gpd_current, ring->enqueue); 532 533 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { 534 535 mreq = next_request(mep); 536 537 if (mreq == NULL || mreq->gpd != gpd) { 538 dev_err(mtu->dev, "no correct RX req is found\n"); 539 break; 540 } 541 req = &mreq->request; 542 543 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); 544 trace_mtu3_complete_gpd(mep, gpd); 545 mtu3_req_complete(mep, req, 0); 546 547 gpd = advance_deq_gpd(ring); 548 } 549 550 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 551 __func__, epnum, ring->dequeue, ring->enqueue); 552 } 553 554 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) 555 { 556 int i; 557 558 for (i = 1; i < mtu->num_eps; i++) { 559 if (done_status & QMU_RX_DONE_INT(i)) 560 qmu_done_rx(mtu, i); 561 if (done_status & QMU_TX_DONE_INT(i)) 562 qmu_done_tx(mtu, i); 563 } 564 } 565 566 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) 567 { 568 void __iomem *mbase = mtu->mac_base; 569 u32 errval; 570 int i; 571 572 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { 573 errval = mtu3_readl(mbase, U3D_RQERRIR0); 574 for (i = 1; i < mtu->num_eps; i++) { 575 if (errval & QMU_RX_CS_ERR(i)) 576 dev_err(mtu->dev, "Rx %d CS error!\n", i); 577 578 if (errval & QMU_RX_LEN_ERR(i)) 579 dev_err(mtu->dev, "RX %d Length error\n", i); 580 } 581 mtu3_writel(mbase, U3D_RQERRIR0, errval); 582 } 583 584 if (qmu_status & RXQ_ZLPERR_INT) { 585 errval = mtu3_readl(mbase, U3D_RQERRIR1); 586 for (i = 1; i < mtu->num_eps; i++) { 587 if (errval & QMU_RX_ZLP_ERR(i)) 588 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); 589 } 590 mtu3_writel(mbase, U3D_RQERRIR1, errval); 591 } 592 593 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { 594 errval = mtu3_readl(mbase, U3D_TQERRIR0); 595 for (i = 1; i < mtu->num_eps; i++) { 596 if (errval & QMU_TX_CS_ERR(i)) 597 dev_err(mtu->dev, "Tx %d checksum error!\n", i); 598 599 if (errval & QMU_TX_LEN_ERR(i)) 600 qmu_tx_zlp_error_handler(mtu, i); 601 } 602 mtu3_writel(mbase, U3D_TQERRIR0, errval); 603 } 604 } 605 606 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) 607 { 608 void __iomem *mbase = mtu->mac_base; 609 u32 qmu_status; 610 u32 qmu_done_status; 611 612 /* U3D_QISAR1 is read update */ 613 qmu_status = mtu3_readl(mbase, U3D_QISAR1); 614 qmu_status &= mtu3_readl(mbase, U3D_QIER1); 615 616 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); 617 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); 618 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ 619 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", 620 (qmu_done_status & 0xFFFF), qmu_done_status >> 16, 621 qmu_status); 622 trace_mtu3_qmu_isr(qmu_done_status, qmu_status); 623 624 if (qmu_done_status) 625 qmu_done_isr(mtu, qmu_done_status); 626 627 if (qmu_status) 628 qmu_exception_isr(mtu, qmu_status); 629 630 return IRQ_HANDLED; 631 } 632 633 int mtu3_qmu_init(struct mtu3 *mtu) 634 { 635 636 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); 637 638 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev, 639 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0); 640 641 if (!mtu->qmu_gpd_pool) 642 return -ENOMEM; 643 644 return 0; 645 } 646 647 void mtu3_qmu_exit(struct mtu3 *mtu) 648 { 649 dma_pool_destroy(mtu->qmu_gpd_pool); 650 } 651