1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13 /* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59 #include <linux/dma-mapping.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/module.h> 62 #include <linux/dmapool.h> 63 #include <linux/iopoll.h> 64 65 #include "core.h" 66 #include "gadget-export.h" 67 #include "cdns3-gadget.h" 68 #include "cdns3-trace.h" 69 #include "drd.h" 70 71 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 72 struct usb_request *request, 73 gfp_t gfp_flags); 74 75 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 76 struct usb_request *request); 77 78 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 79 struct usb_request *request); 80 81 /** 82 * cdns3_clear_register_bit - clear bit in given register. 83 * @ptr: address of device controller register to be read and changed 84 * @mask: bits requested to clar 85 */ 86 static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 87 { 88 mask = readl(ptr) & ~mask; 89 writel(mask, ptr); 90 } 91 92 /** 93 * cdns3_set_register_bit - set bit in given register. 94 * @ptr: address of device controller register to be read and changed 95 * @mask: bits requested to set 96 */ 97 void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 98 { 99 mask = readl(ptr) | mask; 100 writel(mask, ptr); 101 } 102 103 /** 104 * cdns3_ep_addr_to_index - Macro converts endpoint address to 105 * index of endpoint object in cdns3_device.eps[] container 106 * @ep_addr: endpoint address for which endpoint object is required 107 * 108 */ 109 u8 cdns3_ep_addr_to_index(u8 ep_addr) 110 { 111 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 112 } 113 114 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 115 struct cdns3_endpoint *priv_ep) 116 { 117 int dma_index; 118 119 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 120 121 return dma_index / TRB_SIZE; 122 } 123 124 /** 125 * cdns3_next_request - returns next request from list 126 * @list: list containing requests 127 * 128 * Returns request or NULL if no requests in list 129 */ 130 struct usb_request *cdns3_next_request(struct list_head *list) 131 { 132 return list_first_entry_or_null(list, struct usb_request, list); 133 } 134 135 /** 136 * cdns3_next_align_buf - returns next buffer from list 137 * @list: list containing buffers 138 * 139 * Returns buffer or NULL if no buffers in list 140 */ 141 static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 142 { 143 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 144 } 145 146 /** 147 * cdns3_next_priv_request - returns next request from list 148 * @list: list containing requests 149 * 150 * Returns request or NULL if no requests in list 151 */ 152 static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 153 { 154 return list_first_entry_or_null(list, struct cdns3_request, list); 155 } 156 157 /** 158 * cdns3_select_ep - selects endpoint 159 * @priv_dev: extended gadget object 160 * @ep: endpoint address 161 */ 162 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 163 { 164 if (priv_dev->selected_ep == ep) 165 return; 166 167 priv_dev->selected_ep = ep; 168 writel(ep, &priv_dev->regs->ep_sel); 169 } 170 171 /** 172 * cdns3_get_tdl - gets current tdl for selected endpoint. 173 * @priv_dev: extended gadget object 174 * 175 * Before calling this function the appropriate endpoint must 176 * be selected by means of cdns3_select_ep function. 177 */ 178 static int cdns3_get_tdl(struct cdns3_device *priv_dev) 179 { 180 if (priv_dev->dev_ver < DEV_VER_V3) 181 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 182 else 183 return readl(&priv_dev->regs->ep_tdl); 184 } 185 186 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 187 struct cdns3_trb *trb) 188 { 189 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 190 191 return priv_ep->trb_pool_dma + offset; 192 } 193 194 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 195 { 196 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 197 198 if (priv_ep->trb_pool) { 199 dma_pool_free(priv_dev->eps_dma_pool, 200 priv_ep->trb_pool, priv_ep->trb_pool_dma); 201 priv_ep->trb_pool = NULL; 202 } 203 } 204 205 /** 206 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 207 * @priv_ep: endpoint object 208 * 209 * Function will return 0 on success or -ENOMEM on allocation error 210 */ 211 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 212 { 213 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 214 int ring_size = TRB_RING_SIZE; 215 int num_trbs = ring_size / TRB_SIZE; 216 struct cdns3_trb *link_trb; 217 218 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 219 cdns3_free_trb_pool(priv_ep); 220 221 if (!priv_ep->trb_pool) { 222 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, 223 GFP_ATOMIC, 224 &priv_ep->trb_pool_dma); 225 226 if (!priv_ep->trb_pool) 227 return -ENOMEM; 228 229 priv_ep->alloc_ring_size = ring_size; 230 } 231 232 memset(priv_ep->trb_pool, 0, ring_size); 233 234 priv_ep->num_trbs = num_trbs; 235 236 if (!priv_ep->num) 237 return 0; 238 239 /* Initialize the last TRB as Link TRB */ 240 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 241 242 if (priv_ep->use_streams) { 243 /* 244 * For stream capable endpoints driver use single correct TRB. 245 * The last trb has zeroed cycle bit 246 */ 247 link_trb->control = 0; 248 } else { 249 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 250 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 251 } 252 return 0; 253 } 254 255 /** 256 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 257 * @priv_ep: endpoint object 258 * 259 * Endpoint must be selected before call to this function 260 */ 261 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 262 { 263 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 264 int val; 265 266 trace_cdns3_halt(priv_ep, 1, 1); 267 268 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 269 &priv_dev->regs->ep_cmd); 270 271 /* wait for DFLUSH cleared */ 272 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 273 !(val & EP_CMD_DFLUSH), 1, 1000); 274 priv_ep->flags |= EP_STALLED; 275 priv_ep->flags &= ~EP_STALL_PENDING; 276 } 277 278 /** 279 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 280 * @priv_dev: extended gadget object 281 */ 282 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 283 { 284 int i; 285 286 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 287 288 cdns3_allow_enable_l1(priv_dev, 0); 289 priv_dev->hw_configured_flag = 0; 290 priv_dev->onchip_used_size = 0; 291 priv_dev->out_mem_is_allocated = 0; 292 priv_dev->wait_for_setup = 0; 293 priv_dev->using_streams = 0; 294 295 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 296 if (priv_dev->eps[i]) 297 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 298 } 299 300 /** 301 * cdns3_ep_inc_trb - increment a trb index. 302 * @index: Pointer to the TRB index to increment. 303 * @cs: Cycle state 304 * @trb_in_seg: number of TRBs in segment 305 * 306 * The index should never point to the link TRB. After incrementing, 307 * if it is point to the link TRB, wrap around to the beginning and revert 308 * cycle state bit The 309 * link TRB is always at the last TRB entry. 310 */ 311 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 312 { 313 (*index)++; 314 if (*index == (trb_in_seg - 1)) { 315 *index = 0; 316 *cs ^= 1; 317 } 318 } 319 320 /** 321 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 322 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 323 */ 324 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 325 { 326 priv_ep->free_trbs--; 327 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 328 } 329 330 /** 331 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 332 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 333 */ 334 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 335 { 336 priv_ep->free_trbs++; 337 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 338 } 339 340 /** 341 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 342 * @priv_dev: Extended gadget object 343 * @enable: Enable/disable permit to transition to L1. 344 * 345 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 346 * then controller answer with ACK handshake. 347 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 348 * then controller answer with NYET handshake. 349 */ 350 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 351 { 352 if (enable) 353 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 354 else 355 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 356 } 357 358 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 359 { 360 u32 reg; 361 362 reg = readl(&priv_dev->regs->usb_sts); 363 364 if (DEV_SUPERSPEED(reg)) 365 return USB_SPEED_SUPER; 366 else if (DEV_HIGHSPEED(reg)) 367 return USB_SPEED_HIGH; 368 else if (DEV_FULLSPEED(reg)) 369 return USB_SPEED_FULL; 370 else if (DEV_LOWSPEED(reg)) 371 return USB_SPEED_LOW; 372 return USB_SPEED_UNKNOWN; 373 } 374 375 /** 376 * cdns3_start_all_request - add to ring all request not started 377 * @priv_dev: Extended gadget object 378 * @priv_ep: The endpoint for whom request will be started. 379 * 380 * Returns return ENOMEM if transfer ring i not enough TRBs to start 381 * all requests. 382 */ 383 static int cdns3_start_all_request(struct cdns3_device *priv_dev, 384 struct cdns3_endpoint *priv_ep) 385 { 386 struct usb_request *request; 387 int ret = 0; 388 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 389 390 /* 391 * If the last pending transfer is INTERNAL 392 * OR streams are enabled for this endpoint 393 * do NOT start new transfer till the last one is pending 394 */ 395 if (!pending_empty) { 396 struct cdns3_request *priv_req; 397 398 request = cdns3_next_request(&priv_ep->pending_req_list); 399 priv_req = to_cdns3_request(request); 400 if ((priv_req->flags & REQUEST_INTERNAL) || 401 (priv_ep->flags & EP_TDLCHK_EN) || 402 priv_ep->use_streams) { 403 dev_dbg(priv_dev->dev, "Blocking external request\n"); 404 return ret; 405 } 406 } 407 408 while (!list_empty(&priv_ep->deferred_req_list)) { 409 request = cdns3_next_request(&priv_ep->deferred_req_list); 410 411 if (!priv_ep->use_streams) { 412 ret = cdns3_ep_run_transfer(priv_ep, request); 413 } else { 414 priv_ep->stream_sg_idx = 0; 415 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 416 } 417 if (ret) 418 return ret; 419 420 list_move_tail(&request->list, &priv_ep->pending_req_list); 421 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 422 break; 423 } 424 425 priv_ep->flags &= ~EP_RING_FULL; 426 return ret; 427 } 428 429 /* 430 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 431 * driver try to detect whether endpoint need additional internal 432 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 433 * if before first DESCMISS interrupt the DMA will be armed. 434 */ 435 #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 436 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 437 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 438 (reg) |= EP_STS_EN_DESCMISEN; \ 439 } } while (0) 440 441 static void __cdns3_descmiss_copy_data(struct usb_request *request, 442 struct usb_request *descmiss_req) 443 { 444 int length = request->actual + descmiss_req->actual; 445 struct scatterlist *s = request->sg; 446 447 if (!s) { 448 if (length <= request->length) { 449 memcpy(&((u8 *)request->buf)[request->actual], 450 descmiss_req->buf, 451 descmiss_req->actual); 452 request->actual = length; 453 } else { 454 /* It should never occures */ 455 request->status = -ENOMEM; 456 } 457 } else { 458 if (length <= sg_dma_len(s)) { 459 void *p = phys_to_virt(sg_dma_address(s)); 460 461 memcpy(&((u8 *)p)[request->actual], 462 descmiss_req->buf, 463 descmiss_req->actual); 464 request->actual = length; 465 } else { 466 request->status = -ENOMEM; 467 } 468 } 469 } 470 471 /** 472 * cdns3_wa2_descmiss_copy_data - copy data from internal requests to 473 * request queued by class driver. 474 * @priv_ep: extended endpoint object 475 * @request: request object 476 */ 477 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 478 struct usb_request *request) 479 { 480 struct usb_request *descmiss_req; 481 struct cdns3_request *descmiss_priv_req; 482 483 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 484 int chunk_end; 485 486 descmiss_priv_req = 487 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 488 descmiss_req = &descmiss_priv_req->request; 489 490 /* driver can't touch pending request */ 491 if (descmiss_priv_req->flags & REQUEST_PENDING) 492 break; 493 494 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 495 request->status = descmiss_req->status; 496 __cdns3_descmiss_copy_data(request, descmiss_req); 497 list_del_init(&descmiss_priv_req->list); 498 kfree(descmiss_req->buf); 499 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 500 --priv_ep->wa2_counter; 501 502 if (!chunk_end) 503 break; 504 } 505 } 506 507 static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 508 struct cdns3_endpoint *priv_ep, 509 struct cdns3_request *priv_req) 510 { 511 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 512 priv_req->flags & REQUEST_INTERNAL) { 513 struct usb_request *req; 514 515 req = cdns3_next_request(&priv_ep->deferred_req_list); 516 517 priv_ep->descmis_req = NULL; 518 519 if (!req) 520 return NULL; 521 522 /* unmap the gadget request before copying data */ 523 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 524 priv_ep->dir); 525 526 cdns3_wa2_descmiss_copy_data(priv_ep, req); 527 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 528 req->length != req->actual) { 529 /* wait for next part of transfer */ 530 /* re-map the gadget request buffer*/ 531 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 532 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 533 return NULL; 534 } 535 536 if (req->status == -EINPROGRESS) 537 req->status = 0; 538 539 list_del_init(&req->list); 540 cdns3_start_all_request(priv_dev, priv_ep); 541 return req; 542 } 543 544 return &priv_req->request; 545 } 546 547 static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 548 struct cdns3_endpoint *priv_ep, 549 struct cdns3_request *priv_req) 550 { 551 int deferred = 0; 552 553 /* 554 * If transfer was queued before DESCMISS appear than we 555 * can disable handling of DESCMISS interrupt. Driver assumes that it 556 * can disable special treatment for this endpoint. 557 */ 558 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 559 u32 reg; 560 561 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 562 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 563 reg = readl(&priv_dev->regs->ep_sts_en); 564 reg &= ~EP_STS_EN_DESCMISEN; 565 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 566 writel(reg, &priv_dev->regs->ep_sts_en); 567 } 568 569 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 570 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 571 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 572 573 /* 574 * DESCMISS transfer has been finished, so data will be 575 * directly copied from internal allocated usb_request 576 * objects. 577 */ 578 if (pending_empty && !descmiss_empty && 579 !(priv_req->flags & REQUEST_INTERNAL)) { 580 cdns3_wa2_descmiss_copy_data(priv_ep, 581 &priv_req->request); 582 583 trace_cdns3_wa2(priv_ep, "get internal stored data"); 584 585 list_add_tail(&priv_req->request.list, 586 &priv_ep->pending_req_list); 587 cdns3_gadget_giveback(priv_ep, priv_req, 588 priv_req->request.status); 589 590 /* 591 * Intentionally driver returns positive value as 592 * correct value. It informs that transfer has 593 * been finished. 594 */ 595 return EINPROGRESS; 596 } 597 598 /* 599 * Driver will wait for completion DESCMISS transfer, 600 * before starts new, not DESCMISS transfer. 601 */ 602 if (!pending_empty && !descmiss_empty) { 603 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 604 deferred = 1; 605 } 606 607 if (priv_req->flags & REQUEST_INTERNAL) 608 list_add_tail(&priv_req->list, 609 &priv_ep->wa2_descmiss_req_list); 610 } 611 612 return deferred; 613 } 614 615 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 616 { 617 struct cdns3_request *priv_req; 618 619 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 620 u8 chain; 621 622 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 623 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 624 625 trace_cdns3_wa2(priv_ep, "removes eldest request"); 626 627 kfree(priv_req->request.buf); 628 list_del_init(&priv_req->list); 629 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 630 &priv_req->request); 631 --priv_ep->wa2_counter; 632 633 if (!chain) 634 break; 635 } 636 } 637 638 /** 639 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 640 * @priv_ep: extended gadget object 641 * 642 * This function is used only for WA2. For more information see Work around 2 643 * description. 644 */ 645 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 646 { 647 struct cdns3_request *priv_req; 648 struct usb_request *request; 649 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 650 651 /* check for pending transfer */ 652 if (!pending_empty) { 653 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 654 return; 655 } 656 657 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 658 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 659 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 660 } 661 662 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 663 664 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 665 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 666 cdns3_wa2_remove_old_request(priv_ep); 667 } 668 669 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 670 GFP_ATOMIC); 671 if (!request) 672 goto err; 673 674 priv_req = to_cdns3_request(request); 675 priv_req->flags |= REQUEST_INTERNAL; 676 677 /* if this field is still assigned it indicate that transfer related 678 * with this request has not been finished yet. Driver in this 679 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 680 * flag to previous one. It will indicate that current request is 681 * part of the previous one. 682 */ 683 if (priv_ep->descmis_req) 684 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 685 686 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 687 GFP_ATOMIC); 688 priv_ep->wa2_counter++; 689 690 if (!priv_req->request.buf) { 691 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 692 goto err; 693 } 694 695 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 696 priv_ep->descmis_req = priv_req; 697 698 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 699 &priv_ep->descmis_req->request, 700 GFP_ATOMIC); 701 702 return; 703 704 err: 705 dev_err(priv_ep->cdns3_dev->dev, 706 "Failed: No sufficient memory for DESCMIS\n"); 707 } 708 709 static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 710 { 711 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 712 713 if (tdl) { 714 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 715 716 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 717 &priv_dev->regs->ep_cmd); 718 } 719 } 720 721 static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 722 { 723 u32 ep_sts_reg; 724 725 /* select EP0-out */ 726 cdns3_select_ep(priv_dev, 0); 727 728 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 729 730 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 731 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 732 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 733 734 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 735 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 736 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 737 738 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 739 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 740 !pending_empty) { 741 } else { 742 u32 ep_sts_en_reg; 743 u32 ep_cmd_reg; 744 745 cdns3_select_ep(priv_dev, outq_ep->num | 746 outq_ep->dir); 747 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 748 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 749 750 outq_ep->flags |= EP_TDLCHK_EN; 751 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 752 EP_CFG_TDL_CHK); 753 754 cdns3_wa2_enable_detection(priv_dev, outq_ep, 755 ep_sts_en_reg); 756 writel(ep_sts_en_reg, 757 &priv_dev->regs->ep_sts_en); 758 /* reset tdl value to zero */ 759 cdns3_wa2_reset_tdl(priv_dev); 760 /* 761 * Memory barrier - Reset tdl before ringing the 762 * doorbell. 763 */ 764 wmb(); 765 if (EP_CMD_DRDY & ep_cmd_reg) { 766 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 767 768 } else { 769 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 770 /* 771 * ring doorbell to generate DESCMIS irq 772 */ 773 writel(EP_CMD_DRDY, 774 &priv_dev->regs->ep_cmd); 775 } 776 } 777 } 778 } 779 } 780 781 /** 782 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 783 * @priv_ep: The endpoint to whom the request belongs to 784 * @priv_req: The request we're giving back 785 * @status: completion code for the request 786 * 787 * Must be called with controller's lock held and interrupts disabled. This 788 * function will unmap @req and call its ->complete() callback to notify upper 789 * layers that it has completed. 790 */ 791 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 792 struct cdns3_request *priv_req, 793 int status) 794 { 795 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 796 struct usb_request *request = &priv_req->request; 797 798 list_del_init(&request->list); 799 800 if (request->status == -EINPROGRESS) 801 request->status = status; 802 803 if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) 804 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 805 priv_ep->dir); 806 807 if ((priv_req->flags & REQUEST_UNALIGNED) && 808 priv_ep->dir == USB_DIR_OUT && !request->status) { 809 /* Make DMA buffer CPU accessible */ 810 dma_sync_single_for_cpu(priv_dev->sysdev, 811 priv_req->aligned_buf->dma, 812 request->actual, 813 priv_req->aligned_buf->dir); 814 memcpy(request->buf, priv_req->aligned_buf->buf, 815 request->actual); 816 } 817 818 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 819 /* All TRBs have finished, clear the counter */ 820 priv_req->finished_trb = 0; 821 trace_cdns3_gadget_giveback(priv_req); 822 823 if (priv_dev->dev_ver < DEV_VER_V2) { 824 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 825 priv_req); 826 if (!request) 827 return; 828 } 829 830 if (request->complete) { 831 spin_unlock(&priv_dev->lock); 832 usb_gadget_giveback_request(&priv_ep->endpoint, 833 request); 834 spin_lock(&priv_dev->lock); 835 } 836 837 if (request->buf == priv_dev->zlp_buf) 838 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 839 } 840 841 static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 842 { 843 /* Work around for stale data address in TRB*/ 844 if (priv_ep->wa1_set) { 845 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 846 847 priv_ep->wa1_set = 0; 848 priv_ep->wa1_trb_index = 0xFFFF; 849 if (priv_ep->wa1_cycle_bit) { 850 priv_ep->wa1_trb->control = 851 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 852 } else { 853 priv_ep->wa1_trb->control = 854 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 855 } 856 } 857 } 858 859 static void cdns3_free_aligned_request_buf(struct work_struct *work) 860 { 861 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 862 aligned_buf_wq); 863 struct cdns3_aligned_buf *buf, *tmp; 864 unsigned long flags; 865 866 spin_lock_irqsave(&priv_dev->lock, flags); 867 868 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 869 if (!buf->in_use) { 870 list_del(&buf->list); 871 872 /* 873 * Re-enable interrupts to free DMA capable memory. 874 * Driver can't free this memory with disabled 875 * interrupts. 876 */ 877 spin_unlock_irqrestore(&priv_dev->lock, flags); 878 dma_free_noncoherent(priv_dev->sysdev, buf->size, 879 buf->buf, buf->dma, buf->dir); 880 kfree(buf); 881 spin_lock_irqsave(&priv_dev->lock, flags); 882 } 883 } 884 885 spin_unlock_irqrestore(&priv_dev->lock, flags); 886 } 887 888 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 889 { 890 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 891 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 892 struct cdns3_aligned_buf *buf; 893 894 /* check if buffer is aligned to 8. */ 895 if (!((uintptr_t)priv_req->request.buf & 0x7)) 896 return 0; 897 898 buf = priv_req->aligned_buf; 899 900 if (!buf || priv_req->request.length > buf->size) { 901 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 902 if (!buf) 903 return -ENOMEM; 904 905 buf->size = priv_req->request.length; 906 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? 907 DMA_TO_DEVICE : DMA_FROM_DEVICE; 908 909 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, 910 buf->size, 911 &buf->dma, 912 buf->dir, 913 GFP_ATOMIC); 914 if (!buf->buf) { 915 kfree(buf); 916 return -ENOMEM; 917 } 918 919 if (priv_req->aligned_buf) { 920 trace_cdns3_free_aligned_request(priv_req); 921 priv_req->aligned_buf->in_use = 0; 922 queue_work(system_freezable_wq, 923 &priv_dev->aligned_buf_wq); 924 } 925 926 buf->in_use = 1; 927 priv_req->aligned_buf = buf; 928 929 list_add_tail(&buf->list, 930 &priv_dev->aligned_buf_list); 931 } 932 933 if (priv_ep->dir == USB_DIR_IN) { 934 /* Make DMA buffer CPU accessible */ 935 dma_sync_single_for_cpu(priv_dev->sysdev, 936 buf->dma, buf->size, buf->dir); 937 memcpy(buf->buf, priv_req->request.buf, 938 priv_req->request.length); 939 } 940 941 /* Transfer DMA buffer ownership back to device */ 942 dma_sync_single_for_device(priv_dev->sysdev, 943 buf->dma, buf->size, buf->dir); 944 945 priv_req->flags |= REQUEST_UNALIGNED; 946 trace_cdns3_prepare_aligned_request(priv_req); 947 948 return 0; 949 } 950 951 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 952 struct cdns3_trb *trb) 953 { 954 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 955 956 if (!priv_ep->wa1_set) { 957 u32 doorbell; 958 959 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 960 961 if (doorbell) { 962 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 963 priv_ep->wa1_set = 1; 964 priv_ep->wa1_trb = trb; 965 priv_ep->wa1_trb_index = priv_ep->enqueue; 966 trace_cdns3_wa1(priv_ep, "set guard"); 967 return 0; 968 } 969 } 970 return 1; 971 } 972 973 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 974 struct cdns3_endpoint *priv_ep) 975 { 976 int dma_index; 977 u32 doorbell; 978 979 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 980 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 981 982 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 983 cdns3_wa1_restore_cycle_bit(priv_ep); 984 } 985 986 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 987 struct usb_request *request) 988 { 989 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 990 struct cdns3_request *priv_req; 991 struct cdns3_trb *trb; 992 dma_addr_t trb_dma; 993 int address; 994 u32 control; 995 u32 length; 996 u32 tdl; 997 unsigned int sg_idx = priv_ep->stream_sg_idx; 998 999 priv_req = to_cdns3_request(request); 1000 address = priv_ep->endpoint.desc->bEndpointAddress; 1001 1002 priv_ep->flags |= EP_PENDING_REQUEST; 1003 1004 /* must allocate buffer aligned to 8 */ 1005 if (priv_req->flags & REQUEST_UNALIGNED) 1006 trb_dma = priv_req->aligned_buf->dma; 1007 else 1008 trb_dma = request->dma; 1009 1010 /* For stream capable endpoints driver use only single TD. */ 1011 trb = priv_ep->trb_pool + priv_ep->enqueue; 1012 priv_req->start_trb = priv_ep->enqueue; 1013 priv_req->end_trb = priv_req->start_trb; 1014 priv_req->trb = trb; 1015 1016 cdns3_select_ep(priv_ep->cdns3_dev, address); 1017 1018 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1019 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1020 1021 if (!request->num_sgs) { 1022 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1023 length = request->length; 1024 } else { 1025 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1026 length = request->sg[sg_idx].length; 1027 } 1028 1029 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1030 1031 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1032 1033 /* 1034 * For DEV_VER_V2 controller version we have enabled 1035 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1036 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1037 */ 1038 if (priv_dev->dev_ver >= DEV_VER_V2) { 1039 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1040 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1041 } 1042 priv_req->flags |= REQUEST_PENDING; 1043 1044 trb->control = cpu_to_le32(control); 1045 1046 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1047 1048 /* 1049 * Memory barrier - Cycle Bit must be set before trb->length and 1050 * trb->buffer fields. 1051 */ 1052 wmb(); 1053 1054 /* always first element */ 1055 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1056 &priv_dev->regs->ep_traddr); 1057 1058 if (!(priv_ep->flags & EP_STALLED)) { 1059 trace_cdns3_ring(priv_ep); 1060 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1061 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1062 1063 priv_ep->prime_flag = false; 1064 1065 /* 1066 * Controller version DEV_VER_V2 tdl calculation 1067 * is based on TRB 1068 */ 1069 1070 if (priv_dev->dev_ver < DEV_VER_V2) 1071 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1072 &priv_dev->regs->ep_cmd); 1073 else if (priv_dev->dev_ver > DEV_VER_V2) 1074 writel(tdl, &priv_dev->regs->ep_tdl); 1075 1076 priv_ep->last_stream_id = priv_req->request.stream_id; 1077 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1078 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1079 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1080 1081 trace_cdns3_doorbell_epx(priv_ep->name, 1082 readl(&priv_dev->regs->ep_traddr)); 1083 } 1084 1085 /* WORKAROUND for transition to L0 */ 1086 __cdns3_gadget_wakeup(priv_dev); 1087 1088 return 0; 1089 } 1090 1091 static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1092 { 1093 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1094 1095 if (priv_dev->dev_ver < DEV_VER_V3) 1096 return; 1097 1098 if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1099 writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1100 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1101 } 1102 } 1103 1104 /** 1105 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1106 * @priv_ep: endpoint object 1107 * @request: request object 1108 * 1109 * Returns zero on success or negative value on failure 1110 */ 1111 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1112 struct usb_request *request) 1113 { 1114 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1115 struct cdns3_request *priv_req; 1116 struct cdns3_trb *trb; 1117 struct cdns3_trb *link_trb = NULL; 1118 dma_addr_t trb_dma; 1119 u32 togle_pcs = 1; 1120 int sg_iter = 0; 1121 int num_trb; 1122 int address; 1123 u32 control; 1124 int pcs; 1125 u16 total_tdl = 0; 1126 struct scatterlist *s = NULL; 1127 bool sg_supported = !!(request->num_mapped_sgs); 1128 1129 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1130 num_trb = priv_ep->interval; 1131 else 1132 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1133 1134 if (num_trb > priv_ep->free_trbs) { 1135 priv_ep->flags |= EP_RING_FULL; 1136 return -ENOBUFS; 1137 } 1138 1139 priv_req = to_cdns3_request(request); 1140 address = priv_ep->endpoint.desc->bEndpointAddress; 1141 1142 priv_ep->flags |= EP_PENDING_REQUEST; 1143 1144 /* must allocate buffer aligned to 8 */ 1145 if (priv_req->flags & REQUEST_UNALIGNED) 1146 trb_dma = priv_req->aligned_buf->dma; 1147 else 1148 trb_dma = request->dma; 1149 1150 trb = priv_ep->trb_pool + priv_ep->enqueue; 1151 priv_req->start_trb = priv_ep->enqueue; 1152 priv_req->trb = trb; 1153 1154 cdns3_select_ep(priv_ep->cdns3_dev, address); 1155 1156 /* prepare ring */ 1157 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1158 int doorbell, dma_index; 1159 u32 ch_bit = 0; 1160 1161 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1162 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1163 1164 /* Driver can't update LINK TRB if it is current processed. */ 1165 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1166 priv_ep->flags |= EP_DEFERRED_DRDY; 1167 return -ENOBUFS; 1168 } 1169 1170 /*updating C bt in Link TRB before starting DMA*/ 1171 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1172 /* 1173 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1174 * that DMA stuck at the LINK TRB. 1175 * On the other hand, removing TRB_CHAIN for longer TRs for 1176 * epXout cause that DMA stuck after handling LINK TRB. 1177 * To eliminate this strange behavioral driver set TRB_CHAIN 1178 * bit only for TR size > 2. 1179 */ 1180 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1181 TRBS_PER_SEGMENT > 2) 1182 ch_bit = TRB_CHAIN; 1183 1184 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1185 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1186 } 1187 1188 if (priv_dev->dev_ver <= DEV_VER_V2) 1189 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1190 1191 if (sg_supported) 1192 s = request->sg; 1193 1194 /* set incorrect Cycle Bit for first trb*/ 1195 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1196 trb->length = 0; 1197 if (priv_dev->dev_ver >= DEV_VER_V2) { 1198 u16 td_size; 1199 1200 td_size = DIV_ROUND_UP(request->length, 1201 priv_ep->endpoint.maxpacket); 1202 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1203 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1204 else 1205 control |= TRB_TDL_HS_SIZE(td_size); 1206 } 1207 1208 do { 1209 u32 length; 1210 1211 /* fill TRB */ 1212 control |= TRB_TYPE(TRB_NORMAL); 1213 if (sg_supported) { 1214 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1215 length = sg_dma_len(s); 1216 } else { 1217 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1218 length = request->length; 1219 } 1220 1221 if (priv_ep->flags & EP_TDLCHK_EN) 1222 total_tdl += DIV_ROUND_UP(length, 1223 priv_ep->endpoint.maxpacket); 1224 1225 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1226 TRB_LEN(length)); 1227 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1228 1229 /* 1230 * first trb should be prepared as last to avoid processing 1231 * transfer to early 1232 */ 1233 if (sg_iter != 0) 1234 control |= pcs; 1235 1236 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1237 control |= TRB_IOC | TRB_ISP; 1238 } else { 1239 /* for last element in TD or in SG list */ 1240 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1241 control |= pcs | TRB_IOC | TRB_ISP; 1242 } 1243 1244 if (sg_iter) 1245 trb->control = cpu_to_le32(control); 1246 else 1247 priv_req->trb->control = cpu_to_le32(control); 1248 1249 if (sg_supported) { 1250 trb->control |= cpu_to_le32(TRB_ISP); 1251 /* Don't set chain bit for last TRB */ 1252 if (sg_iter < num_trb - 1) 1253 trb->control |= cpu_to_le32(TRB_CHAIN); 1254 1255 s = sg_next(s); 1256 } 1257 1258 control = 0; 1259 ++sg_iter; 1260 priv_req->end_trb = priv_ep->enqueue; 1261 cdns3_ep_inc_enq(priv_ep); 1262 trb = priv_ep->trb_pool + priv_ep->enqueue; 1263 trb->length = 0; 1264 } while (sg_iter < num_trb); 1265 1266 trb = priv_req->trb; 1267 1268 priv_req->flags |= REQUEST_PENDING; 1269 priv_req->num_of_trb = num_trb; 1270 1271 if (sg_iter == 1) 1272 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1273 1274 if (priv_dev->dev_ver < DEV_VER_V2 && 1275 (priv_ep->flags & EP_TDLCHK_EN)) { 1276 u16 tdl = total_tdl; 1277 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1278 1279 if (tdl > EP_CMD_TDL_MAX) { 1280 tdl = EP_CMD_TDL_MAX; 1281 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1282 } 1283 1284 if (old_tdl < tdl) { 1285 tdl -= old_tdl; 1286 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1287 &priv_dev->regs->ep_cmd); 1288 } 1289 } 1290 1291 /* 1292 * Memory barrier - cycle bit must be set before other filds in trb. 1293 */ 1294 wmb(); 1295 1296 /* give the TD to the consumer*/ 1297 if (togle_pcs) 1298 trb->control = trb->control ^ cpu_to_le32(1); 1299 1300 if (priv_dev->dev_ver <= DEV_VER_V2) 1301 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1302 1303 if (num_trb > 1) { 1304 int i = 0; 1305 1306 while (i < num_trb) { 1307 trace_cdns3_prepare_trb(priv_ep, trb + i); 1308 if (trb + i == link_trb) { 1309 trb = priv_ep->trb_pool; 1310 num_trb = num_trb - i; 1311 i = 0; 1312 } else { 1313 i++; 1314 } 1315 } 1316 } else { 1317 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1318 } 1319 1320 /* 1321 * Memory barrier - Cycle Bit must be set before trb->length and 1322 * trb->buffer fields. 1323 */ 1324 wmb(); 1325 1326 /* 1327 * For DMULT mode we can set address to transfer ring only once after 1328 * enabling endpoint. 1329 */ 1330 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1331 /* 1332 * Until SW is not ready to handle the OUT transfer the ISO OUT 1333 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1334 * EP_CFG_ENABLE must be set before updating ep_traddr. 1335 */ 1336 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1337 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1338 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1339 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1340 EP_CFG_ENABLE); 1341 } 1342 1343 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1344 priv_req->start_trb * TRB_SIZE), 1345 &priv_dev->regs->ep_traddr); 1346 1347 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1348 } 1349 1350 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1351 trace_cdns3_ring(priv_ep); 1352 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1353 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1354 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1355 cdns3_rearm_drdy_if_needed(priv_ep); 1356 trace_cdns3_doorbell_epx(priv_ep->name, 1357 readl(&priv_dev->regs->ep_traddr)); 1358 } 1359 1360 /* WORKAROUND for transition to L0 */ 1361 __cdns3_gadget_wakeup(priv_dev); 1362 1363 return 0; 1364 } 1365 1366 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1367 { 1368 struct cdns3_endpoint *priv_ep; 1369 struct usb_ep *ep; 1370 1371 if (priv_dev->hw_configured_flag) 1372 return; 1373 1374 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1375 1376 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1377 USB_CONF_U1EN | USB_CONF_U2EN); 1378 1379 priv_dev->hw_configured_flag = 1; 1380 1381 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1382 if (ep->enabled) { 1383 priv_ep = ep_to_cdns3_ep(ep); 1384 cdns3_start_all_request(priv_dev, priv_ep); 1385 } 1386 } 1387 1388 cdns3_allow_enable_l1(priv_dev, 1); 1389 } 1390 1391 /** 1392 * cdns3_trb_handled - check whether trb has been handled by DMA 1393 * 1394 * @priv_ep: extended endpoint object. 1395 * @priv_req: request object for checking 1396 * 1397 * Endpoint must be selected before invoking this function. 1398 * 1399 * Returns false if request has not been handled by DMA, else returns true. 1400 * 1401 * SR - start ring 1402 * ER - end ring 1403 * DQ = priv_ep->dequeue - dequeue position 1404 * EQ = priv_ep->enqueue - enqueue position 1405 * ST = priv_req->start_trb - index of first TRB in transfer ring 1406 * ET = priv_req->end_trb - index of last TRB in transfer ring 1407 * CI = current_index - index of processed TRB by DMA. 1408 * 1409 * As first step, we check if the TRB between the ST and ET. 1410 * Then, we check if cycle bit for index priv_ep->dequeue 1411 * is correct. 1412 * 1413 * some rules: 1414 * 1. priv_ep->dequeue never equals to current_index. 1415 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1416 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1417 * and priv_ep->free_trbs is zero. 1418 * This case indicate that TR is full. 1419 * 1420 * At below two cases, the request have been handled. 1421 * Case 1 - priv_ep->dequeue < current_index 1422 * SR ... EQ ... DQ ... CI ... ER 1423 * SR ... DQ ... CI ... EQ ... ER 1424 * 1425 * Case 2 - priv_ep->dequeue > current_index 1426 * This situation takes place when CI go through the LINK TRB at the end of 1427 * transfer ring. 1428 * SR ... CI ... EQ ... DQ ... ER 1429 */ 1430 static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1431 struct cdns3_request *priv_req) 1432 { 1433 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1434 struct cdns3_trb *trb; 1435 int current_index = 0; 1436 int handled = 0; 1437 int doorbell; 1438 1439 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1440 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1441 1442 /* current trb doesn't belong to this request */ 1443 if (priv_req->start_trb < priv_req->end_trb) { 1444 if (priv_ep->dequeue > priv_req->end_trb) 1445 goto finish; 1446 1447 if (priv_ep->dequeue < priv_req->start_trb) 1448 goto finish; 1449 } 1450 1451 if ((priv_req->start_trb > priv_req->end_trb) && 1452 (priv_ep->dequeue > priv_req->end_trb) && 1453 (priv_ep->dequeue < priv_req->start_trb)) 1454 goto finish; 1455 1456 if ((priv_req->start_trb == priv_req->end_trb) && 1457 (priv_ep->dequeue != priv_req->end_trb)) 1458 goto finish; 1459 1460 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1461 1462 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1463 goto finish; 1464 1465 if (doorbell == 1 && current_index == priv_ep->dequeue) 1466 goto finish; 1467 1468 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1469 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1470 handled = 1; 1471 goto finish; 1472 } 1473 1474 if (priv_ep->enqueue == priv_ep->dequeue && 1475 priv_ep->free_trbs == 0) { 1476 handled = 1; 1477 } else if (priv_ep->dequeue < current_index) { 1478 if ((current_index == (priv_ep->num_trbs - 1)) && 1479 !priv_ep->dequeue) 1480 goto finish; 1481 1482 handled = 1; 1483 } else if (priv_ep->dequeue > current_index) { 1484 handled = 1; 1485 } 1486 1487 finish: 1488 trace_cdns3_request_handled(priv_req, current_index, handled); 1489 1490 return handled; 1491 } 1492 1493 static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1494 struct cdns3_endpoint *priv_ep) 1495 { 1496 struct cdns3_request *priv_req; 1497 struct usb_request *request; 1498 struct cdns3_trb *trb; 1499 bool request_handled = false; 1500 bool transfer_end = false; 1501 1502 while (!list_empty(&priv_ep->pending_req_list)) { 1503 request = cdns3_next_request(&priv_ep->pending_req_list); 1504 priv_req = to_cdns3_request(request); 1505 1506 trb = priv_ep->trb_pool + priv_ep->dequeue; 1507 1508 /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ 1509 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1510 trace_cdns3_complete_trb(priv_ep, trb); 1511 cdns3_ep_inc_deq(priv_ep); 1512 trb = priv_ep->trb_pool + priv_ep->dequeue; 1513 } 1514 1515 if (!request->stream_id) { 1516 /* Re-select endpoint. It could be changed by other CPU 1517 * during handling usb_gadget_giveback_request. 1518 */ 1519 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1520 1521 while (cdns3_trb_handled(priv_ep, priv_req)) { 1522 priv_req->finished_trb++; 1523 if (priv_req->finished_trb >= priv_req->num_of_trb) 1524 request_handled = true; 1525 1526 trb = priv_ep->trb_pool + priv_ep->dequeue; 1527 trace_cdns3_complete_trb(priv_ep, trb); 1528 1529 if (!transfer_end) 1530 request->actual += 1531 TRB_LEN(le32_to_cpu(trb->length)); 1532 1533 if (priv_req->num_of_trb > 1 && 1534 le32_to_cpu(trb->control) & TRB_SMM && 1535 le32_to_cpu(trb->control) & TRB_CHAIN) 1536 transfer_end = true; 1537 1538 cdns3_ep_inc_deq(priv_ep); 1539 } 1540 1541 if (request_handled) { 1542 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1543 request_handled = false; 1544 transfer_end = false; 1545 } else { 1546 goto prepare_next_td; 1547 } 1548 1549 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1550 TRBS_PER_SEGMENT == 2) 1551 break; 1552 } else { 1553 /* Re-select endpoint. It could be changed by other CPU 1554 * during handling usb_gadget_giveback_request. 1555 */ 1556 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1557 1558 trb = priv_ep->trb_pool; 1559 trace_cdns3_complete_trb(priv_ep, trb); 1560 1561 if (trb != priv_req->trb) 1562 dev_warn(priv_dev->dev, 1563 "request_trb=0x%p, queue_trb=0x%p\n", 1564 priv_req->trb, trb); 1565 1566 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1567 1568 if (!request->num_sgs || 1569 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1570 priv_ep->stream_sg_idx = 0; 1571 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1572 } else { 1573 priv_ep->stream_sg_idx++; 1574 cdns3_ep_run_stream_transfer(priv_ep, request); 1575 } 1576 break; 1577 } 1578 } 1579 priv_ep->flags &= ~EP_PENDING_REQUEST; 1580 1581 prepare_next_td: 1582 if (!(priv_ep->flags & EP_STALLED) && 1583 !(priv_ep->flags & EP_STALL_PENDING)) 1584 cdns3_start_all_request(priv_dev, priv_ep); 1585 } 1586 1587 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1588 { 1589 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1590 1591 cdns3_wa1_restore_cycle_bit(priv_ep); 1592 1593 if (rearm) { 1594 trace_cdns3_ring(priv_ep); 1595 1596 /* Cycle Bit must be updated before arming DMA. */ 1597 wmb(); 1598 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1599 1600 __cdns3_gadget_wakeup(priv_dev); 1601 1602 trace_cdns3_doorbell_epx(priv_ep->name, 1603 readl(&priv_dev->regs->ep_traddr)); 1604 } 1605 } 1606 1607 static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1608 { 1609 u16 tdl = priv_ep->pending_tdl; 1610 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1611 1612 if (tdl > EP_CMD_TDL_MAX) { 1613 tdl = EP_CMD_TDL_MAX; 1614 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1615 } else { 1616 priv_ep->pending_tdl = 0; 1617 } 1618 1619 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1620 } 1621 1622 /** 1623 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1624 * @priv_ep: endpoint object 1625 * 1626 * Returns 0 1627 */ 1628 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1629 { 1630 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1631 u32 ep_sts_reg; 1632 struct usb_request *deferred_request; 1633 struct usb_request *pending_request; 1634 u32 tdl = 0; 1635 1636 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1637 1638 trace_cdns3_epx_irq(priv_dev, priv_ep); 1639 1640 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1641 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1642 1643 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1644 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1645 1646 tdl = cdns3_get_tdl(priv_dev); 1647 1648 /* 1649 * Continue the previous transfer: 1650 * There is some racing between ERDY and PRIME. The device send 1651 * ERDY and almost in the same time Host send PRIME. It cause 1652 * that host ignore the ERDY packet and driver has to send it 1653 * again. 1654 */ 1655 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1656 EP_STS_HOSTPP(ep_sts_reg))) { 1657 writel(EP_CMD_ERDY | 1658 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1659 &priv_dev->regs->ep_cmd); 1660 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1661 } else { 1662 priv_ep->prime_flag = true; 1663 1664 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1665 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1666 1667 if (deferred_request && !pending_request) { 1668 cdns3_start_all_request(priv_dev, priv_ep); 1669 } 1670 } 1671 } 1672 1673 if (ep_sts_reg & EP_STS_TRBERR) { 1674 if (priv_ep->flags & EP_STALL_PENDING && 1675 !(ep_sts_reg & EP_STS_DESCMIS && 1676 priv_dev->dev_ver < DEV_VER_V2)) { 1677 cdns3_ep_stall_flush(priv_ep); 1678 } 1679 1680 /* 1681 * For isochronous transfer driver completes request on 1682 * IOC or on TRBERR. IOC appears only when device receive 1683 * OUT data packet. If host disable stream or lost some packet 1684 * then the only way to finish all queued transfer is to do it 1685 * on TRBERR event. 1686 */ 1687 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1688 !priv_ep->wa1_set) { 1689 if (!priv_ep->dir) { 1690 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1691 1692 ep_cfg &= ~EP_CFG_ENABLE; 1693 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1694 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1695 priv_ep->flags |= EP_UPDATE_EP_TRBADDR; 1696 } 1697 cdns3_transfer_completed(priv_dev, priv_ep); 1698 } else if (!(priv_ep->flags & EP_STALLED) && 1699 !(priv_ep->flags & EP_STALL_PENDING)) { 1700 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1701 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1702 cdns3_start_all_request(priv_dev, priv_ep); 1703 } else { 1704 cdns3_rearm_transfer(priv_ep, 1705 priv_ep->wa1_set); 1706 } 1707 } 1708 } 1709 1710 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1711 (ep_sts_reg & EP_STS_IOT)) { 1712 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1713 if (ep_sts_reg & EP_STS_ISP) 1714 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1715 else 1716 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1717 } 1718 1719 if (!priv_ep->use_streams) { 1720 if ((ep_sts_reg & EP_STS_IOC) || 1721 (ep_sts_reg & EP_STS_ISP)) { 1722 cdns3_transfer_completed(priv_dev, priv_ep); 1723 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1724 priv_ep->pending_tdl) { 1725 /* handle IOT with pending tdl */ 1726 cdns3_reprogram_tdl(priv_ep); 1727 } 1728 } else if (priv_ep->dir == USB_DIR_OUT) { 1729 priv_ep->ep_sts_pending |= ep_sts_reg; 1730 } else if (ep_sts_reg & EP_STS_IOT) { 1731 cdns3_transfer_completed(priv_dev, priv_ep); 1732 } 1733 } 1734 1735 /* 1736 * MD_EXIT interrupt sets when stream capable endpoint exits 1737 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1738 */ 1739 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1740 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1741 priv_ep->ep_sts_pending = 0; 1742 cdns3_transfer_completed(priv_dev, priv_ep); 1743 } 1744 1745 /* 1746 * WA2: this condition should only be meet when 1747 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1748 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1749 * In other cases this interrupt will be disabled. 1750 */ 1751 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1752 !(priv_ep->flags & EP_STALLED)) 1753 cdns3_wa2_descmissing_packet(priv_ep); 1754 1755 return 0; 1756 } 1757 1758 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1759 { 1760 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1761 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1762 } 1763 1764 /** 1765 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1766 * @priv_dev: extended gadget object 1767 * @usb_ists: bitmap representation of device's reported interrupts 1768 * (usb_ists register value) 1769 */ 1770 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1771 u32 usb_ists) 1772 __must_hold(&priv_dev->lock) 1773 { 1774 int speed = 0; 1775 1776 trace_cdns3_usb_irq(priv_dev, usb_ists); 1777 if (usb_ists & USB_ISTS_L1ENTI) { 1778 /* 1779 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1780 * from L1. To fix it, if any DMA transfer is pending driver 1781 * must starts driving resume signal immediately. 1782 */ 1783 if (readl(&priv_dev->regs->drbl)) 1784 __cdns3_gadget_wakeup(priv_dev); 1785 } 1786 1787 /* Connection detected */ 1788 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1789 speed = cdns3_get_speed(priv_dev); 1790 priv_dev->gadget.speed = speed; 1791 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1792 cdns3_ep0_config(priv_dev); 1793 } 1794 1795 /* Disconnection detected */ 1796 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1797 spin_unlock(&priv_dev->lock); 1798 cdns3_disconnect_gadget(priv_dev); 1799 spin_lock(&priv_dev->lock); 1800 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1801 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1802 cdns3_hw_reset_eps_config(priv_dev); 1803 } 1804 1805 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1806 if (priv_dev->gadget_driver && 1807 priv_dev->gadget_driver->suspend) { 1808 spin_unlock(&priv_dev->lock); 1809 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1810 spin_lock(&priv_dev->lock); 1811 } 1812 } 1813 1814 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1815 if (priv_dev->gadget_driver && 1816 priv_dev->gadget_driver->resume) { 1817 spin_unlock(&priv_dev->lock); 1818 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1819 spin_lock(&priv_dev->lock); 1820 } 1821 } 1822 1823 /* reset*/ 1824 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1825 if (priv_dev->gadget_driver) { 1826 spin_unlock(&priv_dev->lock); 1827 usb_gadget_udc_reset(&priv_dev->gadget, 1828 priv_dev->gadget_driver); 1829 spin_lock(&priv_dev->lock); 1830 1831 /*read again to check the actual speed*/ 1832 speed = cdns3_get_speed(priv_dev); 1833 priv_dev->gadget.speed = speed; 1834 cdns3_hw_reset_eps_config(priv_dev); 1835 cdns3_ep0_config(priv_dev); 1836 } 1837 } 1838 } 1839 1840 /** 1841 * cdns3_device_irq_handler - interrupt handler for device part of controller 1842 * 1843 * @irq: irq number for cdns3 core device 1844 * @data: structure of cdns3 1845 * 1846 * Returns IRQ_HANDLED or IRQ_NONE 1847 */ 1848 static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1849 { 1850 struct cdns3_device *priv_dev = data; 1851 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1852 irqreturn_t ret = IRQ_NONE; 1853 u32 reg; 1854 1855 if (cdns->in_lpm) 1856 return ret; 1857 1858 /* check USB device interrupt */ 1859 reg = readl(&priv_dev->regs->usb_ists); 1860 if (reg) { 1861 /* After masking interrupts the new interrupts won't be 1862 * reported in usb_ists/ep_ists. In order to not lose some 1863 * of them driver disables only detected interrupts. 1864 * They will be enabled ASAP after clearing source of 1865 * interrupt. This an unusual behavior only applies to 1866 * usb_ists register. 1867 */ 1868 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1869 /* mask deferred interrupt. */ 1870 writel(reg, &priv_dev->regs->usb_ien); 1871 ret = IRQ_WAKE_THREAD; 1872 } 1873 1874 /* check endpoint interrupt */ 1875 reg = readl(&priv_dev->regs->ep_ists); 1876 if (reg) { 1877 writel(0, &priv_dev->regs->ep_ien); 1878 ret = IRQ_WAKE_THREAD; 1879 } 1880 1881 return ret; 1882 } 1883 1884 /** 1885 * cdns3_device_thread_irq_handler - interrupt handler for device part 1886 * of controller 1887 * 1888 * @irq: irq number for cdns3 core device 1889 * @data: structure of cdns3 1890 * 1891 * Returns IRQ_HANDLED or IRQ_NONE 1892 */ 1893 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1894 { 1895 struct cdns3_device *priv_dev = data; 1896 irqreturn_t ret = IRQ_NONE; 1897 unsigned long flags; 1898 unsigned int bit; 1899 unsigned long reg; 1900 1901 spin_lock_irqsave(&priv_dev->lock, flags); 1902 1903 reg = readl(&priv_dev->regs->usb_ists); 1904 if (reg) { 1905 writel(reg, &priv_dev->regs->usb_ists); 1906 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1907 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1908 ret = IRQ_HANDLED; 1909 } 1910 1911 reg = readl(&priv_dev->regs->ep_ists); 1912 1913 /* handle default endpoint OUT */ 1914 if (reg & EP_ISTS_EP_OUT0) { 1915 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1916 ret = IRQ_HANDLED; 1917 } 1918 1919 /* handle default endpoint IN */ 1920 if (reg & EP_ISTS_EP_IN0) { 1921 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1922 ret = IRQ_HANDLED; 1923 } 1924 1925 /* check if interrupt from non default endpoint, if no exit */ 1926 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1927 if (!reg) 1928 goto irqend; 1929 1930 for_each_set_bit(bit, ®, 1931 sizeof(u32) * BITS_PER_BYTE) { 1932 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1933 ret = IRQ_HANDLED; 1934 } 1935 1936 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1937 cdns3_wa2_check_outq_status(priv_dev); 1938 1939 irqend: 1940 writel(~0, &priv_dev->regs->ep_ien); 1941 spin_unlock_irqrestore(&priv_dev->lock, flags); 1942 1943 return ret; 1944 } 1945 1946 /** 1947 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1948 * 1949 * The real reservation will occur during write to EP_CFG register, 1950 * this function is used to check if the 'size' reservation is allowed. 1951 * 1952 * @priv_dev: extended gadget object 1953 * @size: the size (KB) for EP would like to allocate 1954 * @is_in: endpoint direction 1955 * 1956 * Return 0 if the required size can met or negative value on failure 1957 */ 1958 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1959 int size, int is_in) 1960 { 1961 int remained; 1962 1963 /* 2KB are reserved for EP0*/ 1964 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1965 1966 if (is_in) { 1967 if (remained < size) 1968 return -EPERM; 1969 1970 priv_dev->onchip_used_size += size; 1971 } else { 1972 int required; 1973 1974 /** 1975 * ALL OUT EPs are shared the same chunk onchip memory, so 1976 * driver checks if it already has assigned enough buffers 1977 */ 1978 if (priv_dev->out_mem_is_allocated >= size) 1979 return 0; 1980 1981 required = size - priv_dev->out_mem_is_allocated; 1982 1983 if (required > remained) 1984 return -EPERM; 1985 1986 priv_dev->out_mem_is_allocated += required; 1987 priv_dev->onchip_used_size += required; 1988 } 1989 1990 return 0; 1991 } 1992 1993 static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1994 struct cdns3_endpoint *priv_ep) 1995 { 1996 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1997 1998 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 1999 if (priv_dev->dev_ver <= DEV_VER_V2) 2000 writel(USB_CONF_DMULT, ®s->usb_conf); 2001 2002 if (priv_dev->dev_ver == DEV_VER_V2) 2003 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2004 2005 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2006 u32 mask; 2007 2008 if (priv_ep->dir) 2009 mask = BIT(priv_ep->num + 16); 2010 else 2011 mask = BIT(priv_ep->num); 2012 2013 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 2014 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2015 cdns3_set_register_bit(®s->tdl_beh, mask); 2016 cdns3_set_register_bit(®s->tdl_beh2, mask); 2017 cdns3_set_register_bit(®s->dma_adv_td, mask); 2018 } 2019 2020 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2021 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2022 2023 cdns3_set_register_bit(®s->dtrans, mask); 2024 } 2025 } 2026 2027 /** 2028 * cdns3_ep_config - Configure hardware endpoint 2029 * @priv_ep: extended endpoint object 2030 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2031 */ 2032 int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2033 { 2034 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2035 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2036 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2037 u32 max_packet_size = 0; 2038 u8 maxburst = 0; 2039 u32 ep_cfg = 0; 2040 u8 buffering; 2041 u8 mult = 0; 2042 int ret; 2043 2044 buffering = priv_dev->ep_buf_size - 1; 2045 2046 cdns3_configure_dmult(priv_dev, priv_ep); 2047 2048 switch (priv_ep->type) { 2049 case USB_ENDPOINT_XFER_INT: 2050 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2051 2052 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2053 ep_cfg |= EP_CFG_TDL_CHK; 2054 break; 2055 case USB_ENDPOINT_XFER_BULK: 2056 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2057 2058 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2059 ep_cfg |= EP_CFG_TDL_CHK; 2060 break; 2061 default: 2062 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2063 mult = priv_dev->ep_iso_burst - 1; 2064 buffering = mult + 1; 2065 } 2066 2067 switch (priv_dev->gadget.speed) { 2068 case USB_SPEED_FULL: 2069 max_packet_size = is_iso_ep ? 1023 : 64; 2070 break; 2071 case USB_SPEED_HIGH: 2072 max_packet_size = is_iso_ep ? 1024 : 512; 2073 break; 2074 case USB_SPEED_SUPER: 2075 /* It's limitation that driver assumes in driver. */ 2076 mult = 0; 2077 max_packet_size = 1024; 2078 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2079 maxburst = priv_dev->ep_iso_burst - 1; 2080 buffering = (mult + 1) * 2081 (maxburst + 1); 2082 2083 if (priv_ep->interval > 1) 2084 buffering++; 2085 } else { 2086 maxburst = priv_dev->ep_buf_size - 1; 2087 } 2088 break; 2089 default: 2090 /* all other speed are not supported */ 2091 return -EINVAL; 2092 } 2093 2094 if (max_packet_size == 1024) 2095 priv_ep->trb_burst_size = 128; 2096 else if (max_packet_size >= 512) 2097 priv_ep->trb_burst_size = 64; 2098 else 2099 priv_ep->trb_burst_size = 16; 2100 2101 /* 2102 * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs 2103 * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the 2104 * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI 2105 * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This 2106 * results in data corruption when it crosses the 4K border. The corruption 2107 * specifically occurs from the position (4K - (address & 0x7F)) to 4K. 2108 * 2109 * So force trb_burst_size to 16 at such platform. 2110 */ 2111 if (priv_dev->dev_ver < DEV_VER_V2) 2112 priv_ep->trb_burst_size = 16; 2113 2114 mult = min_t(u8, mult, EP_CFG_MULT_MAX); 2115 buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); 2116 maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); 2117 2118 /* onchip buffer is only allocated before configuration */ 2119 if (!priv_dev->hw_configured_flag) { 2120 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2121 !!priv_ep->dir); 2122 if (ret) { 2123 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2124 return ret; 2125 } 2126 } 2127 2128 if (enable) 2129 ep_cfg |= EP_CFG_ENABLE; 2130 2131 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2132 if (priv_dev->dev_ver >= DEV_VER_V3) { 2133 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2134 2135 /* 2136 * Stream capable endpoints are handled by using ep_tdl 2137 * register. Other endpoints use TDL from TRB feature. 2138 */ 2139 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2140 mask); 2141 } 2142 2143 /* Enable Stream Bit TDL chk and SID chk */ 2144 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2145 } 2146 2147 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2148 EP_CFG_MULT(mult) | 2149 EP_CFG_BUFFERING(buffering) | 2150 EP_CFG_MAXBURST(maxburst); 2151 2152 cdns3_select_ep(priv_dev, bEndpointAddress); 2153 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2154 priv_ep->flags |= EP_CONFIGURED; 2155 2156 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2157 priv_ep->name, ep_cfg); 2158 2159 return 0; 2160 } 2161 2162 /* Find correct direction for HW endpoint according to description */ 2163 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2164 struct cdns3_endpoint *priv_ep) 2165 { 2166 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2167 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2168 } 2169 2170 static struct 2171 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2172 struct usb_endpoint_descriptor *desc) 2173 { 2174 struct usb_ep *ep; 2175 struct cdns3_endpoint *priv_ep; 2176 2177 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2178 unsigned long num; 2179 int ret; 2180 /* ep name pattern likes epXin or epXout */ 2181 char c[2] = {ep->name[2], '\0'}; 2182 2183 ret = kstrtoul(c, 10, &num); 2184 if (ret) 2185 return ERR_PTR(ret); 2186 2187 priv_ep = ep_to_cdns3_ep(ep); 2188 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2189 if (!(priv_ep->flags & EP_CLAIMED)) { 2190 priv_ep->num = num; 2191 return priv_ep; 2192 } 2193 } 2194 } 2195 2196 return ERR_PTR(-ENOENT); 2197 } 2198 2199 /* 2200 * Cadence IP has one limitation that all endpoints must be configured 2201 * (Type & MaxPacketSize) before setting configuration through hardware 2202 * register, it means we can't change endpoints configuration after 2203 * set_configuration. 2204 * 2205 * This function set EP_CLAIMED flag which is added when the gadget driver 2206 * uses usb_ep_autoconfig to configure specific endpoint; 2207 * When the udc driver receives set_configurion request, 2208 * it goes through all claimed endpoints, and configure all endpoints 2209 * accordingly. 2210 * 2211 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2212 * ep_cfg register which can be changed after set_configuration, and do 2213 * some software operation accordingly. 2214 */ 2215 static struct 2216 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2217 struct usb_endpoint_descriptor *desc, 2218 struct usb_ss_ep_comp_descriptor *comp_desc) 2219 { 2220 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2221 struct cdns3_endpoint *priv_ep; 2222 unsigned long flags; 2223 2224 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2225 if (IS_ERR(priv_ep)) { 2226 dev_err(priv_dev->dev, "no available ep\n"); 2227 return NULL; 2228 } 2229 2230 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2231 2232 spin_lock_irqsave(&priv_dev->lock, flags); 2233 priv_ep->endpoint.desc = desc; 2234 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2235 priv_ep->type = usb_endpoint_type(desc); 2236 priv_ep->flags |= EP_CLAIMED; 2237 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2238 2239 spin_unlock_irqrestore(&priv_dev->lock, flags); 2240 return &priv_ep->endpoint; 2241 } 2242 2243 /** 2244 * cdns3_gadget_ep_alloc_request - Allocates request 2245 * @ep: endpoint object associated with request 2246 * @gfp_flags: gfp flags 2247 * 2248 * Returns allocated request address, NULL on allocation error 2249 */ 2250 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2251 gfp_t gfp_flags) 2252 { 2253 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2254 struct cdns3_request *priv_req; 2255 2256 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2257 if (!priv_req) 2258 return NULL; 2259 2260 priv_req->priv_ep = priv_ep; 2261 2262 trace_cdns3_alloc_request(priv_req); 2263 return &priv_req->request; 2264 } 2265 2266 /** 2267 * cdns3_gadget_ep_free_request - Free memory occupied by request 2268 * @ep: endpoint object associated with request 2269 * @request: request to free memory 2270 */ 2271 void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2272 struct usb_request *request) 2273 { 2274 struct cdns3_request *priv_req = to_cdns3_request(request); 2275 2276 if (priv_req->aligned_buf) 2277 priv_req->aligned_buf->in_use = 0; 2278 2279 trace_cdns3_free_request(priv_req); 2280 kfree(priv_req); 2281 } 2282 2283 /** 2284 * cdns3_gadget_ep_enable - Enable endpoint 2285 * @ep: endpoint object 2286 * @desc: endpoint descriptor 2287 * 2288 * Returns 0 on success, error code elsewhere 2289 */ 2290 static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2291 const struct usb_endpoint_descriptor *desc) 2292 { 2293 struct cdns3_endpoint *priv_ep; 2294 struct cdns3_device *priv_dev; 2295 const struct usb_ss_ep_comp_descriptor *comp_desc; 2296 u32 reg = EP_STS_EN_TRBERREN; 2297 u32 bEndpointAddress; 2298 unsigned long flags; 2299 int enable = 1; 2300 int ret = 0; 2301 int val; 2302 2303 if (!ep) { 2304 pr_debug("usbss: ep not configured?\n"); 2305 return -EINVAL; 2306 } 2307 2308 priv_ep = ep_to_cdns3_ep(ep); 2309 priv_dev = priv_ep->cdns3_dev; 2310 comp_desc = priv_ep->endpoint.comp_desc; 2311 2312 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2313 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2314 return -EINVAL; 2315 } 2316 2317 if (!desc->wMaxPacketSize) { 2318 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2319 return -EINVAL; 2320 } 2321 2322 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2323 "%s is already enabled\n", priv_ep->name)) 2324 return 0; 2325 2326 spin_lock_irqsave(&priv_dev->lock, flags); 2327 2328 priv_ep->endpoint.desc = desc; 2329 priv_ep->type = usb_endpoint_type(desc); 2330 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2331 2332 if (priv_ep->interval > ISO_MAX_INTERVAL && 2333 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2334 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2335 ISO_MAX_INTERVAL); 2336 2337 ret = -EINVAL; 2338 goto exit; 2339 } 2340 2341 bEndpointAddress = priv_ep->num | priv_ep->dir; 2342 cdns3_select_ep(priv_dev, bEndpointAddress); 2343 2344 /* 2345 * For some versions of controller at some point during ISO OUT traffic 2346 * DMA reads Transfer Ring for the EP which has never got doorbell. 2347 * This issue was detected only on simulation, but to avoid this issue 2348 * driver add protection against it. To fix it driver enable ISO OUT 2349 * endpoint before setting DRBL. This special treatment of ISO OUT 2350 * endpoints are recommended by controller specification. 2351 */ 2352 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2353 enable = 0; 2354 2355 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2356 /* 2357 * Enable stream support (SS mode) related interrupts 2358 * in EP_STS_EN Register 2359 */ 2360 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2361 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2362 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2363 EP_STS_EN_STREAMREN; 2364 priv_ep->use_streams = true; 2365 ret = cdns3_ep_config(priv_ep, enable); 2366 priv_dev->using_streams |= true; 2367 } 2368 } else { 2369 ret = cdns3_ep_config(priv_ep, enable); 2370 } 2371 2372 if (ret) 2373 goto exit; 2374 2375 ret = cdns3_allocate_trb_pool(priv_ep); 2376 if (ret) 2377 goto exit; 2378 2379 bEndpointAddress = priv_ep->num | priv_ep->dir; 2380 cdns3_select_ep(priv_dev, bEndpointAddress); 2381 2382 trace_cdns3_gadget_ep_enable(priv_ep); 2383 2384 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2385 2386 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2387 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2388 1, 1000); 2389 2390 if (unlikely(ret)) { 2391 cdns3_free_trb_pool(priv_ep); 2392 ret = -EINVAL; 2393 goto exit; 2394 } 2395 2396 /* enable interrupt for selected endpoint */ 2397 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2398 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2399 2400 if (priv_dev->dev_ver < DEV_VER_V2) 2401 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2402 2403 writel(reg, &priv_dev->regs->ep_sts_en); 2404 2405 ep->desc = desc; 2406 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2407 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2408 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2409 priv_ep->wa1_set = 0; 2410 priv_ep->enqueue = 0; 2411 priv_ep->dequeue = 0; 2412 reg = readl(&priv_dev->regs->ep_sts); 2413 priv_ep->pcs = !!EP_STS_CCS(reg); 2414 priv_ep->ccs = !!EP_STS_CCS(reg); 2415 /* one TRB is reserved for link TRB used in DMULT mode*/ 2416 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2417 exit: 2418 spin_unlock_irqrestore(&priv_dev->lock, flags); 2419 2420 return ret; 2421 } 2422 2423 /** 2424 * cdns3_gadget_ep_disable - Disable endpoint 2425 * @ep: endpoint object 2426 * 2427 * Returns 0 on success, error code elsewhere 2428 */ 2429 static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2430 { 2431 struct cdns3_endpoint *priv_ep; 2432 struct cdns3_request *priv_req; 2433 struct cdns3_device *priv_dev; 2434 struct usb_request *request; 2435 unsigned long flags; 2436 int ret = 0; 2437 u32 ep_cfg; 2438 int val; 2439 2440 if (!ep) { 2441 pr_err("usbss: invalid parameters\n"); 2442 return -EINVAL; 2443 } 2444 2445 priv_ep = ep_to_cdns3_ep(ep); 2446 priv_dev = priv_ep->cdns3_dev; 2447 2448 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2449 "%s is already disabled\n", priv_ep->name)) 2450 return 0; 2451 2452 spin_lock_irqsave(&priv_dev->lock, flags); 2453 2454 trace_cdns3_gadget_ep_disable(priv_ep); 2455 2456 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2457 2458 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2459 ep_cfg &= ~EP_CFG_ENABLE; 2460 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2461 2462 /** 2463 * Driver needs some time before resetting endpoint. 2464 * It need waits for clearing DBUSY bit or for timeout expired. 2465 * 10us is enough time for controller to stop transfer. 2466 */ 2467 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2468 !(val & EP_STS_DBUSY), 1, 10); 2469 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2470 2471 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2472 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2473 1, 1000); 2474 if (unlikely(ret)) 2475 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2476 priv_ep->name); 2477 2478 while (!list_empty(&priv_ep->pending_req_list)) { 2479 request = cdns3_next_request(&priv_ep->pending_req_list); 2480 2481 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2482 -ESHUTDOWN); 2483 } 2484 2485 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2486 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2487 2488 kfree(priv_req->request.buf); 2489 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2490 &priv_req->request); 2491 list_del_init(&priv_req->list); 2492 --priv_ep->wa2_counter; 2493 } 2494 2495 while (!list_empty(&priv_ep->deferred_req_list)) { 2496 request = cdns3_next_request(&priv_ep->deferred_req_list); 2497 2498 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2499 -ESHUTDOWN); 2500 } 2501 2502 priv_ep->descmis_req = NULL; 2503 2504 ep->desc = NULL; 2505 priv_ep->flags &= ~EP_ENABLED; 2506 priv_ep->use_streams = false; 2507 2508 spin_unlock_irqrestore(&priv_dev->lock, flags); 2509 2510 return ret; 2511 } 2512 2513 /** 2514 * __cdns3_gadget_ep_queue - Transfer data on endpoint 2515 * @ep: endpoint object 2516 * @request: request object 2517 * @gfp_flags: gfp flags 2518 * 2519 * Returns 0 on success, error code elsewhere 2520 */ 2521 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2522 struct usb_request *request, 2523 gfp_t gfp_flags) 2524 { 2525 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2526 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2527 struct cdns3_request *priv_req; 2528 int ret = 0; 2529 2530 request->actual = 0; 2531 request->status = -EINPROGRESS; 2532 priv_req = to_cdns3_request(request); 2533 trace_cdns3_ep_queue(priv_req); 2534 2535 if (priv_dev->dev_ver < DEV_VER_V2) { 2536 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2537 priv_req); 2538 2539 if (ret == EINPROGRESS) 2540 return 0; 2541 } 2542 2543 ret = cdns3_prepare_aligned_request_buf(priv_req); 2544 if (ret < 0) 2545 return ret; 2546 2547 if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) { 2548 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2549 usb_endpoint_dir_in(ep->desc)); 2550 if (ret) 2551 return ret; 2552 } 2553 2554 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2555 2556 /* 2557 * For stream capable endpoint if prime irq flag is set then only start 2558 * request. 2559 * If hardware endpoint configuration has not been set yet then 2560 * just queue request in deferred list. Transfer will be started in 2561 * cdns3_set_hw_configuration. 2562 */ 2563 if (!request->stream_id) { 2564 if (priv_dev->hw_configured_flag && 2565 !(priv_ep->flags & EP_STALLED) && 2566 !(priv_ep->flags & EP_STALL_PENDING)) 2567 cdns3_start_all_request(priv_dev, priv_ep); 2568 } else { 2569 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2570 cdns3_start_all_request(priv_dev, priv_ep); 2571 } 2572 2573 return 0; 2574 } 2575 2576 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2577 gfp_t gfp_flags) 2578 { 2579 struct usb_request *zlp_request; 2580 struct cdns3_endpoint *priv_ep; 2581 struct cdns3_device *priv_dev; 2582 unsigned long flags; 2583 int ret; 2584 2585 if (!request || !ep) 2586 return -EINVAL; 2587 2588 priv_ep = ep_to_cdns3_ep(ep); 2589 priv_dev = priv_ep->cdns3_dev; 2590 2591 spin_lock_irqsave(&priv_dev->lock, flags); 2592 2593 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2594 2595 if (ret == 0 && request->zero && request->length && 2596 (request->length % ep->maxpacket == 0)) { 2597 struct cdns3_request *priv_req; 2598 2599 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2600 zlp_request->buf = priv_dev->zlp_buf; 2601 zlp_request->length = 0; 2602 2603 priv_req = to_cdns3_request(zlp_request); 2604 priv_req->flags |= REQUEST_ZLP; 2605 2606 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2607 priv_ep->name); 2608 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2609 } 2610 2611 spin_unlock_irqrestore(&priv_dev->lock, flags); 2612 return ret; 2613 } 2614 2615 /** 2616 * cdns3_gadget_ep_dequeue - Remove request from transfer queue 2617 * @ep: endpoint object associated with request 2618 * @request: request object 2619 * 2620 * Returns 0 on success, error code elsewhere 2621 */ 2622 int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2623 struct usb_request *request) 2624 { 2625 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2626 struct cdns3_device *priv_dev; 2627 struct usb_request *req, *req_temp; 2628 struct cdns3_request *priv_req; 2629 struct cdns3_trb *link_trb; 2630 u8 req_on_hw_ring = 0; 2631 unsigned long flags; 2632 int ret = 0; 2633 int val; 2634 2635 if (!ep || !request || !ep->desc) 2636 return -EINVAL; 2637 2638 priv_dev = priv_ep->cdns3_dev; 2639 2640 spin_lock_irqsave(&priv_dev->lock, flags); 2641 2642 priv_req = to_cdns3_request(request); 2643 2644 trace_cdns3_ep_dequeue(priv_req); 2645 2646 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2647 2648 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2649 list) { 2650 if (request == req) { 2651 req_on_hw_ring = 1; 2652 goto found; 2653 } 2654 } 2655 2656 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2657 list) { 2658 if (request == req) 2659 goto found; 2660 } 2661 2662 goto not_found; 2663 2664 found: 2665 link_trb = priv_req->trb; 2666 2667 /* Update ring only if removed request is on pending_req_list list */ 2668 if (req_on_hw_ring && link_trb) { 2669 /* Stop DMA */ 2670 writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd); 2671 2672 /* wait for DFLUSH cleared */ 2673 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2674 !(val & EP_CMD_DFLUSH), 1, 1000); 2675 2676 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2677 ((priv_req->end_trb + 1) * TRB_SIZE))); 2678 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2679 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2680 2681 if (priv_ep->wa1_trb == priv_req->trb) 2682 cdns3_wa1_restore_cycle_bit(priv_ep); 2683 } 2684 2685 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2686 2687 req = cdns3_next_request(&priv_ep->pending_req_list); 2688 if (req) 2689 cdns3_rearm_transfer(priv_ep, 1); 2690 2691 not_found: 2692 spin_unlock_irqrestore(&priv_dev->lock, flags); 2693 return ret; 2694 } 2695 2696 /** 2697 * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint 2698 * Should be called after acquiring spin_lock and selecting ep 2699 * @priv_ep: endpoint object to set stall on. 2700 */ 2701 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2702 { 2703 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2704 2705 trace_cdns3_halt(priv_ep, 1, 0); 2706 2707 if (!(priv_ep->flags & EP_STALLED)) { 2708 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2709 2710 if (!(ep_sts_reg & EP_STS_DBUSY)) 2711 cdns3_ep_stall_flush(priv_ep); 2712 else 2713 priv_ep->flags |= EP_STALL_PENDING; 2714 } 2715 } 2716 2717 /** 2718 * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint 2719 * Should be called after acquiring spin_lock and selecting ep 2720 * @priv_ep: endpoint object to clear stall on 2721 */ 2722 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2723 { 2724 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2725 struct usb_request *request; 2726 struct cdns3_request *priv_req; 2727 struct cdns3_trb *trb = NULL; 2728 struct cdns3_trb trb_tmp; 2729 int ret; 2730 int val; 2731 2732 trace_cdns3_halt(priv_ep, 0, 0); 2733 2734 request = cdns3_next_request(&priv_ep->pending_req_list); 2735 if (request) { 2736 priv_req = to_cdns3_request(request); 2737 trb = priv_req->trb; 2738 if (trb) { 2739 trb_tmp = *trb; 2740 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2741 } 2742 } 2743 2744 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2745 2746 /* wait for EPRST cleared */ 2747 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2748 !(val & EP_CMD_EPRST), 1, 100); 2749 if (ret) 2750 return -EINVAL; 2751 2752 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2753 2754 if (request) { 2755 if (trb) 2756 *trb = trb_tmp; 2757 2758 cdns3_rearm_transfer(priv_ep, 1); 2759 } 2760 2761 cdns3_start_all_request(priv_dev, priv_ep); 2762 return ret; 2763 } 2764 2765 /** 2766 * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint 2767 * @ep: endpoint object to set/clear stall on 2768 * @value: 1 for set stall, 0 for clear stall 2769 * 2770 * Returns 0 on success, error code elsewhere 2771 */ 2772 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2773 { 2774 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2775 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2776 unsigned long flags; 2777 int ret = 0; 2778 2779 if (!(priv_ep->flags & EP_ENABLED)) 2780 return -EPERM; 2781 2782 spin_lock_irqsave(&priv_dev->lock, flags); 2783 2784 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2785 2786 if (!value) { 2787 priv_ep->flags &= ~EP_WEDGE; 2788 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2789 } else { 2790 __cdns3_gadget_ep_set_halt(priv_ep); 2791 } 2792 2793 spin_unlock_irqrestore(&priv_dev->lock, flags); 2794 2795 return ret; 2796 } 2797 2798 extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2799 2800 static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2801 .enable = cdns3_gadget_ep_enable, 2802 .disable = cdns3_gadget_ep_disable, 2803 .alloc_request = cdns3_gadget_ep_alloc_request, 2804 .free_request = cdns3_gadget_ep_free_request, 2805 .queue = cdns3_gadget_ep_queue, 2806 .dequeue = cdns3_gadget_ep_dequeue, 2807 .set_halt = cdns3_gadget_ep_set_halt, 2808 .set_wedge = cdns3_gadget_ep_set_wedge, 2809 }; 2810 2811 /** 2812 * cdns3_gadget_get_frame - Returns number of actual ITP frame 2813 * @gadget: gadget object 2814 * 2815 * Returns number of actual ITP frame 2816 */ 2817 static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2818 { 2819 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2820 2821 return readl(&priv_dev->regs->usb_itpn); 2822 } 2823 2824 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2825 { 2826 enum usb_device_speed speed; 2827 2828 speed = cdns3_get_speed(priv_dev); 2829 2830 if (speed >= USB_SPEED_SUPER) 2831 return 0; 2832 2833 /* Start driving resume signaling to indicate remote wakeup. */ 2834 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2835 2836 return 0; 2837 } 2838 2839 static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2840 { 2841 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2842 unsigned long flags; 2843 int ret = 0; 2844 2845 spin_lock_irqsave(&priv_dev->lock, flags); 2846 ret = __cdns3_gadget_wakeup(priv_dev); 2847 spin_unlock_irqrestore(&priv_dev->lock, flags); 2848 return ret; 2849 } 2850 2851 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2852 int is_selfpowered) 2853 { 2854 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2855 unsigned long flags; 2856 2857 spin_lock_irqsave(&priv_dev->lock, flags); 2858 priv_dev->is_selfpowered = !!is_selfpowered; 2859 spin_unlock_irqrestore(&priv_dev->lock, flags); 2860 return 0; 2861 } 2862 2863 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2864 { 2865 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2866 2867 if (is_on) { 2868 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2869 } else { 2870 writel(~0, &priv_dev->regs->ep_ists); 2871 writel(~0, &priv_dev->regs->usb_ists); 2872 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2873 } 2874 2875 return 0; 2876 } 2877 2878 static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2879 { 2880 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2881 u32 reg; 2882 2883 cdns3_ep0_config(priv_dev); 2884 2885 /* enable interrupts for endpoint 0 (in and out) */ 2886 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2887 2888 /* 2889 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2890 * revision of controller. 2891 */ 2892 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2893 reg = readl(®s->dbg_link1); 2894 2895 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2896 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2897 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2898 writel(reg, ®s->dbg_link1); 2899 } 2900 2901 /* 2902 * By default some platforms has set protected access to memory. 2903 * This cause problem with cache, so driver restore non-secure 2904 * access to memory. 2905 */ 2906 reg = readl(®s->dma_axi_ctrl); 2907 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2908 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2909 writel(reg, ®s->dma_axi_ctrl); 2910 2911 /* enable generic interrupt*/ 2912 writel(USB_IEN_INIT, ®s->usb_ien); 2913 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2914 /* keep Fast Access bit */ 2915 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2916 2917 cdns3_configure_dmult(priv_dev, NULL); 2918 } 2919 2920 /** 2921 * cdns3_gadget_udc_start - Gadget start 2922 * @gadget: gadget object 2923 * @driver: driver which operates on this gadget 2924 * 2925 * Returns 0 on success, error code elsewhere 2926 */ 2927 static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2928 struct usb_gadget_driver *driver) 2929 { 2930 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2931 unsigned long flags; 2932 enum usb_device_speed max_speed = driver->max_speed; 2933 2934 spin_lock_irqsave(&priv_dev->lock, flags); 2935 priv_dev->gadget_driver = driver; 2936 2937 /* limit speed if necessary */ 2938 max_speed = min(driver->max_speed, gadget->max_speed); 2939 2940 switch (max_speed) { 2941 case USB_SPEED_FULL: 2942 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2943 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2944 break; 2945 case USB_SPEED_HIGH: 2946 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2947 break; 2948 case USB_SPEED_SUPER: 2949 break; 2950 default: 2951 dev_err(priv_dev->dev, 2952 "invalid maximum_speed parameter %d\n", 2953 max_speed); 2954 fallthrough; 2955 case USB_SPEED_UNKNOWN: 2956 /* default to superspeed */ 2957 max_speed = USB_SPEED_SUPER; 2958 break; 2959 } 2960 2961 cdns3_gadget_config(priv_dev); 2962 spin_unlock_irqrestore(&priv_dev->lock, flags); 2963 return 0; 2964 } 2965 2966 /** 2967 * cdns3_gadget_udc_stop - Stops gadget 2968 * @gadget: gadget object 2969 * 2970 * Returns 0 2971 */ 2972 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2973 { 2974 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2975 struct cdns3_endpoint *priv_ep; 2976 u32 bEndpointAddress; 2977 struct usb_ep *ep; 2978 int val; 2979 2980 priv_dev->gadget_driver = NULL; 2981 2982 priv_dev->onchip_used_size = 0; 2983 priv_dev->out_mem_is_allocated = 0; 2984 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2985 2986 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2987 priv_ep = ep_to_cdns3_ep(ep); 2988 bEndpointAddress = priv_ep->num | priv_ep->dir; 2989 cdns3_select_ep(priv_dev, bEndpointAddress); 2990 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2991 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2992 !(val & EP_CMD_EPRST), 1, 100); 2993 2994 priv_ep->flags &= ~EP_CLAIMED; 2995 } 2996 2997 /* disable interrupt for device */ 2998 writel(0, &priv_dev->regs->usb_ien); 2999 writel(0, &priv_dev->regs->usb_pwr); 3000 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 3001 3002 return 0; 3003 } 3004 3005 /** 3006 * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration 3007 * @gadget: pointer to the USB gadget 3008 * 3009 * Used to record the maximum number of endpoints being used in a USB composite 3010 * device. (across all configurations) This is to be used in the calculation 3011 * of the TXFIFO sizes when resizing internal memory for individual endpoints. 3012 * It will help ensured that the resizing logic reserves enough space for at 3013 * least one max packet. 3014 */ 3015 static int cdns3_gadget_check_config(struct usb_gadget *gadget) 3016 { 3017 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 3018 struct cdns3_endpoint *priv_ep; 3019 struct usb_ep *ep; 3020 int n_in = 0; 3021 int total; 3022 3023 list_for_each_entry(ep, &gadget->ep_list, ep_list) { 3024 priv_ep = ep_to_cdns3_ep(ep); 3025 if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN)) 3026 n_in++; 3027 } 3028 3029 /* 2KB are reserved for EP0, 1KB for out*/ 3030 total = 2 + n_in + 1; 3031 3032 if (total > priv_dev->onchip_buffers) 3033 return -ENOMEM; 3034 3035 priv_dev->ep_buf_size = priv_dev->ep_iso_burst = 3036 (priv_dev->onchip_buffers - 2) / (n_in + 1); 3037 3038 return 0; 3039 } 3040 3041 static const struct usb_gadget_ops cdns3_gadget_ops = { 3042 .get_frame = cdns3_gadget_get_frame, 3043 .wakeup = cdns3_gadget_wakeup, 3044 .set_selfpowered = cdns3_gadget_set_selfpowered, 3045 .pullup = cdns3_gadget_pullup, 3046 .udc_start = cdns3_gadget_udc_start, 3047 .udc_stop = cdns3_gadget_udc_stop, 3048 .match_ep = cdns3_gadget_match_ep, 3049 .check_config = cdns3_gadget_check_config, 3050 }; 3051 3052 static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 3053 { 3054 int i; 3055 3056 /* ep0 OUT point to ep0 IN. */ 3057 priv_dev->eps[16] = NULL; 3058 3059 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 3060 if (priv_dev->eps[i]) { 3061 cdns3_free_trb_pool(priv_dev->eps[i]); 3062 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 3063 } 3064 } 3065 3066 /** 3067 * cdns3_init_eps - Initializes software endpoints of gadget 3068 * @priv_dev: extended gadget object 3069 * 3070 * Returns 0 on success, error code elsewhere 3071 */ 3072 static int cdns3_init_eps(struct cdns3_device *priv_dev) 3073 { 3074 u32 ep_enabled_reg, iso_ep_reg; 3075 struct cdns3_endpoint *priv_ep; 3076 int ep_dir, ep_number; 3077 u32 ep_mask; 3078 int ret = 0; 3079 int i; 3080 3081 /* Read it from USB_CAP3 to USB_CAP5 */ 3082 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3083 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3084 3085 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3086 3087 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3088 ep_dir = i >> 4; /* i div 16 */ 3089 ep_number = i & 0xF; /* i % 16 */ 3090 ep_mask = BIT(i); 3091 3092 if (!(ep_enabled_reg & ep_mask)) 3093 continue; 3094 3095 if (ep_dir && !ep_number) { 3096 priv_dev->eps[i] = priv_dev->eps[0]; 3097 continue; 3098 } 3099 3100 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3101 GFP_KERNEL); 3102 if (!priv_ep) 3103 goto err; 3104 3105 /* set parent of endpoint object */ 3106 priv_ep->cdns3_dev = priv_dev; 3107 priv_dev->eps[i] = priv_ep; 3108 priv_ep->num = ep_number; 3109 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3110 3111 if (!ep_number) { 3112 ret = cdns3_init_ep0(priv_dev, priv_ep); 3113 if (ret) { 3114 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3115 goto err; 3116 } 3117 } else { 3118 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3119 ep_number, !!ep_dir ? "in" : "out"); 3120 priv_ep->endpoint.name = priv_ep->name; 3121 3122 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3123 CDNS3_EP_MAX_PACKET_LIMIT); 3124 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3125 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3126 if (ep_dir) 3127 priv_ep->endpoint.caps.dir_in = 1; 3128 else 3129 priv_ep->endpoint.caps.dir_out = 1; 3130 3131 if (iso_ep_reg & ep_mask) 3132 priv_ep->endpoint.caps.type_iso = 1; 3133 3134 priv_ep->endpoint.caps.type_bulk = 1; 3135 priv_ep->endpoint.caps.type_int = 1; 3136 3137 list_add_tail(&priv_ep->endpoint.ep_list, 3138 &priv_dev->gadget.ep_list); 3139 } 3140 3141 priv_ep->flags = 0; 3142 3143 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3144 priv_ep->name, 3145 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3146 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3147 3148 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3149 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3150 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3151 } 3152 3153 return 0; 3154 err: 3155 cdns3_free_all_eps(priv_dev); 3156 return -ENOMEM; 3157 } 3158 3159 static void cdns3_gadget_release(struct device *dev) 3160 { 3161 struct cdns3_device *priv_dev = container_of(dev, 3162 struct cdns3_device, gadget.dev); 3163 3164 kfree(priv_dev); 3165 } 3166 3167 static void cdns3_gadget_exit(struct cdns *cdns) 3168 { 3169 struct cdns3_device *priv_dev; 3170 3171 priv_dev = cdns->gadget_dev; 3172 3173 3174 pm_runtime_mark_last_busy(cdns->dev); 3175 pm_runtime_put_autosuspend(cdns->dev); 3176 3177 usb_del_gadget(&priv_dev->gadget); 3178 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3179 3180 cdns3_free_all_eps(priv_dev); 3181 3182 while (!list_empty(&priv_dev->aligned_buf_list)) { 3183 struct cdns3_aligned_buf *buf; 3184 3185 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3186 dma_free_noncoherent(priv_dev->sysdev, buf->size, 3187 buf->buf, 3188 buf->dma, 3189 buf->dir); 3190 3191 list_del(&buf->list); 3192 kfree(buf); 3193 } 3194 3195 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3196 priv_dev->setup_dma); 3197 dma_pool_destroy(priv_dev->eps_dma_pool); 3198 3199 kfree(priv_dev->zlp_buf); 3200 usb_put_gadget(&priv_dev->gadget); 3201 cdns->gadget_dev = NULL; 3202 cdns_drd_gadget_off(cdns); 3203 } 3204 3205 static int cdns3_gadget_start(struct cdns *cdns) 3206 { 3207 struct cdns3_device *priv_dev; 3208 u32 max_speed; 3209 int ret; 3210 3211 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3212 if (!priv_dev) 3213 return -ENOMEM; 3214 3215 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3216 cdns3_gadget_release); 3217 cdns->gadget_dev = priv_dev; 3218 priv_dev->sysdev = cdns->dev; 3219 priv_dev->dev = cdns->dev; 3220 priv_dev->regs = cdns->dev_regs; 3221 3222 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3223 &priv_dev->onchip_buffers); 3224 3225 if (priv_dev->onchip_buffers <= 0) { 3226 u32 reg = readl(&priv_dev->regs->usb_cap2); 3227 3228 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3229 } 3230 3231 if (!priv_dev->onchip_buffers) 3232 priv_dev->onchip_buffers = 256; 3233 3234 max_speed = usb_get_maximum_speed(cdns->dev); 3235 3236 /* Check the maximum_speed parameter */ 3237 switch (max_speed) { 3238 case USB_SPEED_FULL: 3239 case USB_SPEED_HIGH: 3240 case USB_SPEED_SUPER: 3241 break; 3242 default: 3243 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3244 max_speed); 3245 fallthrough; 3246 case USB_SPEED_UNKNOWN: 3247 /* default to superspeed */ 3248 max_speed = USB_SPEED_SUPER; 3249 break; 3250 } 3251 3252 /* fill gadget fields */ 3253 priv_dev->gadget.max_speed = max_speed; 3254 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3255 priv_dev->gadget.ops = &cdns3_gadget_ops; 3256 priv_dev->gadget.name = "usb-ss-gadget"; 3257 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3258 priv_dev->gadget.irq = cdns->dev_irq; 3259 3260 spin_lock_init(&priv_dev->lock); 3261 INIT_WORK(&priv_dev->pending_status_wq, 3262 cdns3_pending_setup_status_handler); 3263 3264 INIT_WORK(&priv_dev->aligned_buf_wq, 3265 cdns3_free_aligned_request_buf); 3266 3267 /* initialize endpoint container */ 3268 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3269 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3270 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", 3271 priv_dev->sysdev, 3272 TRB_RING_SIZE, 8, 0); 3273 if (!priv_dev->eps_dma_pool) { 3274 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); 3275 ret = -ENOMEM; 3276 goto err1; 3277 } 3278 3279 ret = cdns3_init_eps(priv_dev); 3280 if (ret) { 3281 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3282 goto err1; 3283 } 3284 3285 /* allocate memory for setup packet buffer */ 3286 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3287 &priv_dev->setup_dma, GFP_DMA); 3288 if (!priv_dev->setup_buf) { 3289 ret = -ENOMEM; 3290 goto err2; 3291 } 3292 3293 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3294 3295 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3296 readl(&priv_dev->regs->usb_cap6)); 3297 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3298 readl(&priv_dev->regs->usb_cap1)); 3299 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3300 readl(&priv_dev->regs->usb_cap2)); 3301 3302 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3303 if (priv_dev->dev_ver >= DEV_VER_V2) 3304 priv_dev->gadget.sg_supported = 1; 3305 3306 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3307 if (!priv_dev->zlp_buf) { 3308 ret = -ENOMEM; 3309 goto err3; 3310 } 3311 3312 /* add USB gadget device */ 3313 ret = usb_add_gadget(&priv_dev->gadget); 3314 if (ret < 0) { 3315 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3316 goto err4; 3317 } 3318 3319 return 0; 3320 err4: 3321 kfree(priv_dev->zlp_buf); 3322 err3: 3323 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3324 priv_dev->setup_dma); 3325 err2: 3326 cdns3_free_all_eps(priv_dev); 3327 err1: 3328 dma_pool_destroy(priv_dev->eps_dma_pool); 3329 3330 usb_put_gadget(&priv_dev->gadget); 3331 cdns->gadget_dev = NULL; 3332 return ret; 3333 } 3334 3335 static int __cdns3_gadget_init(struct cdns *cdns) 3336 { 3337 int ret = 0; 3338 3339 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3340 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3341 if (ret) { 3342 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3343 return ret; 3344 } 3345 3346 cdns_drd_gadget_on(cdns); 3347 pm_runtime_get_sync(cdns->dev); 3348 3349 ret = cdns3_gadget_start(cdns); 3350 if (ret) { 3351 pm_runtime_put_sync(cdns->dev); 3352 return ret; 3353 } 3354 3355 /* 3356 * Because interrupt line can be shared with other components in 3357 * driver it can't use IRQF_ONESHOT flag here. 3358 */ 3359 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3360 cdns3_device_irq_handler, 3361 cdns3_device_thread_irq_handler, 3362 IRQF_SHARED, dev_name(cdns->dev), 3363 cdns->gadget_dev); 3364 3365 if (ret) 3366 goto err0; 3367 3368 return 0; 3369 err0: 3370 cdns3_gadget_exit(cdns); 3371 return ret; 3372 } 3373 3374 static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3375 __must_hold(&cdns->lock) 3376 { 3377 struct cdns3_device *priv_dev = cdns->gadget_dev; 3378 3379 spin_unlock(&cdns->lock); 3380 cdns3_disconnect_gadget(priv_dev); 3381 spin_lock(&cdns->lock); 3382 3383 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3384 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3385 cdns3_hw_reset_eps_config(priv_dev); 3386 3387 /* disable interrupt for device */ 3388 writel(0, &priv_dev->regs->usb_ien); 3389 3390 return 0; 3391 } 3392 3393 static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3394 { 3395 struct cdns3_device *priv_dev = cdns->gadget_dev; 3396 3397 if (!priv_dev->gadget_driver) 3398 return 0; 3399 3400 cdns3_gadget_config(priv_dev); 3401 if (hibernated) 3402 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 3403 3404 return 0; 3405 } 3406 3407 /** 3408 * cdns3_gadget_init - initialize device structure 3409 * 3410 * @cdns: cdns instance 3411 * 3412 * This function initializes the gadget. 3413 */ 3414 int cdns3_gadget_init(struct cdns *cdns) 3415 { 3416 struct cdns_role_driver *rdrv; 3417 3418 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3419 if (!rdrv) 3420 return -ENOMEM; 3421 3422 rdrv->start = __cdns3_gadget_init; 3423 rdrv->stop = cdns3_gadget_exit; 3424 rdrv->suspend = cdns3_gadget_suspend; 3425 rdrv->resume = cdns3_gadget_resume; 3426 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3427 rdrv->name = "gadget"; 3428 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3429 3430 return 0; 3431 } 3432