1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13 /* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59 #include <linux/dma-mapping.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/module.h> 62 #include <linux/dmapool.h> 63 #include <linux/iopoll.h> 64 #include <linux/property.h> 65 66 #include "core.h" 67 #include "gadget-export.h" 68 #include "cdns3-gadget.h" 69 #include "cdns3-trace.h" 70 #include "drd.h" 71 72 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 73 struct usb_request *request, 74 gfp_t gfp_flags); 75 76 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 77 struct usb_request *request); 78 79 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 80 struct usb_request *request); 81 82 /** 83 * cdns3_clear_register_bit - clear bit in given register. 84 * @ptr: address of device controller register to be read and changed 85 * @mask: bits requested to clar 86 */ 87 static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 88 { 89 mask = readl(ptr) & ~mask; 90 writel(mask, ptr); 91 } 92 93 /** 94 * cdns3_set_register_bit - set bit in given register. 95 * @ptr: address of device controller register to be read and changed 96 * @mask: bits requested to set 97 */ 98 void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 99 { 100 mask = readl(ptr) | mask; 101 writel(mask, ptr); 102 } 103 104 /** 105 * cdns3_ep_addr_to_index - Macro converts endpoint address to 106 * index of endpoint object in cdns3_device.eps[] container 107 * @ep_addr: endpoint address for which endpoint object is required 108 * 109 */ 110 u8 cdns3_ep_addr_to_index(u8 ep_addr) 111 { 112 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 113 } 114 115 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 116 struct cdns3_endpoint *priv_ep) 117 { 118 int dma_index; 119 120 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 121 122 return dma_index / TRB_SIZE; 123 } 124 125 /** 126 * cdns3_next_request - returns next request from list 127 * @list: list containing requests 128 * 129 * Returns request or NULL if no requests in list 130 */ 131 struct usb_request *cdns3_next_request(struct list_head *list) 132 { 133 return list_first_entry_or_null(list, struct usb_request, list); 134 } 135 136 /** 137 * cdns3_next_align_buf - returns next buffer from list 138 * @list: list containing buffers 139 * 140 * Returns buffer or NULL if no buffers in list 141 */ 142 static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 143 { 144 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 145 } 146 147 /** 148 * cdns3_next_priv_request - returns next request from list 149 * @list: list containing requests 150 * 151 * Returns request or NULL if no requests in list 152 */ 153 static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 154 { 155 return list_first_entry_or_null(list, struct cdns3_request, list); 156 } 157 158 /** 159 * cdns3_select_ep - selects endpoint 160 * @priv_dev: extended gadget object 161 * @ep: endpoint address 162 */ 163 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 164 { 165 if (priv_dev->selected_ep == ep) 166 return; 167 168 priv_dev->selected_ep = ep; 169 writel(ep, &priv_dev->regs->ep_sel); 170 } 171 172 /** 173 * cdns3_get_tdl - gets current tdl for selected endpoint. 174 * @priv_dev: extended gadget object 175 * 176 * Before calling this function the appropriate endpoint must 177 * be selected by means of cdns3_select_ep function. 178 */ 179 static int cdns3_get_tdl(struct cdns3_device *priv_dev) 180 { 181 if (priv_dev->dev_ver < DEV_VER_V3) 182 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 183 else 184 return readl(&priv_dev->regs->ep_tdl); 185 } 186 187 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 188 struct cdns3_trb *trb) 189 { 190 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 191 192 return priv_ep->trb_pool_dma + offset; 193 } 194 195 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 196 { 197 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 198 199 if (priv_ep->trb_pool) { 200 dma_pool_free(priv_dev->eps_dma_pool, 201 priv_ep->trb_pool, priv_ep->trb_pool_dma); 202 priv_ep->trb_pool = NULL; 203 } 204 } 205 206 /** 207 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 208 * @priv_ep: endpoint object 209 * 210 * Function will return 0 on success or -ENOMEM on allocation error 211 */ 212 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 213 { 214 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 215 int ring_size = TRB_RING_SIZE; 216 int num_trbs = ring_size / TRB_SIZE; 217 struct cdns3_trb *link_trb; 218 219 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 220 cdns3_free_trb_pool(priv_ep); 221 222 if (!priv_ep->trb_pool) { 223 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, 224 GFP_ATOMIC, 225 &priv_ep->trb_pool_dma); 226 227 if (!priv_ep->trb_pool) 228 return -ENOMEM; 229 230 priv_ep->alloc_ring_size = ring_size; 231 } 232 233 memset(priv_ep->trb_pool, 0, ring_size); 234 235 priv_ep->num_trbs = num_trbs; 236 237 if (!priv_ep->num) 238 return 0; 239 240 /* Initialize the last TRB as Link TRB */ 241 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 242 243 if (priv_ep->use_streams) { 244 /* 245 * For stream capable endpoints driver use single correct TRB. 246 * The last trb has zeroed cycle bit 247 */ 248 link_trb->control = 0; 249 } else { 250 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 251 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 252 } 253 return 0; 254 } 255 256 /** 257 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 258 * @priv_ep: endpoint object 259 * 260 * Endpoint must be selected before call to this function 261 */ 262 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 263 { 264 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 265 int val; 266 267 trace_cdns3_halt(priv_ep, 1, 1); 268 269 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 270 &priv_dev->regs->ep_cmd); 271 272 /* wait for DFLUSH cleared */ 273 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 274 !(val & EP_CMD_DFLUSH), 1, 1000); 275 priv_ep->flags |= EP_STALLED; 276 priv_ep->flags &= ~EP_STALL_PENDING; 277 } 278 279 /** 280 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 281 * @priv_dev: extended gadget object 282 */ 283 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 284 { 285 int i; 286 287 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 288 289 cdns3_allow_enable_l1(priv_dev, 0); 290 priv_dev->hw_configured_flag = 0; 291 priv_dev->onchip_used_size = 0; 292 priv_dev->out_mem_is_allocated = 0; 293 priv_dev->wait_for_setup = 0; 294 priv_dev->using_streams = 0; 295 296 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 297 if (priv_dev->eps[i]) 298 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 299 } 300 301 /** 302 * cdns3_ep_inc_trb - increment a trb index. 303 * @index: Pointer to the TRB index to increment. 304 * @cs: Cycle state 305 * @trb_in_seg: number of TRBs in segment 306 * 307 * The index should never point to the link TRB. After incrementing, 308 * if it is point to the link TRB, wrap around to the beginning and revert 309 * cycle state bit The 310 * link TRB is always at the last TRB entry. 311 */ 312 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 313 { 314 (*index)++; 315 if (*index == (trb_in_seg - 1)) { 316 *index = 0; 317 *cs ^= 1; 318 } 319 } 320 321 /** 322 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 323 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 324 */ 325 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 326 { 327 priv_ep->free_trbs--; 328 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 329 } 330 331 /** 332 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 333 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 334 */ 335 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 336 { 337 priv_ep->free_trbs++; 338 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 339 } 340 341 /** 342 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 343 * @priv_dev: Extended gadget object 344 * @enable: Enable/disable permit to transition to L1. 345 * 346 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 347 * then controller answer with ACK handshake. 348 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 349 * then controller answer with NYET handshake. 350 */ 351 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 352 { 353 if (enable) 354 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 355 else 356 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 357 } 358 359 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 360 { 361 u32 reg; 362 363 reg = readl(&priv_dev->regs->usb_sts); 364 365 if (DEV_SUPERSPEED(reg)) 366 return USB_SPEED_SUPER; 367 else if (DEV_HIGHSPEED(reg)) 368 return USB_SPEED_HIGH; 369 else if (DEV_FULLSPEED(reg)) 370 return USB_SPEED_FULL; 371 else if (DEV_LOWSPEED(reg)) 372 return USB_SPEED_LOW; 373 return USB_SPEED_UNKNOWN; 374 } 375 376 /** 377 * cdns3_start_all_request - add to ring all request not started 378 * @priv_dev: Extended gadget object 379 * @priv_ep: The endpoint for whom request will be started. 380 * 381 * Returns return ENOMEM if transfer ring i not enough TRBs to start 382 * all requests. 383 */ 384 static int cdns3_start_all_request(struct cdns3_device *priv_dev, 385 struct cdns3_endpoint *priv_ep) 386 { 387 struct usb_request *request; 388 int ret = 0; 389 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 390 391 /* 392 * If the last pending transfer is INTERNAL 393 * OR streams are enabled for this endpoint 394 * do NOT start new transfer till the last one is pending 395 */ 396 if (!pending_empty) { 397 struct cdns3_request *priv_req; 398 399 request = cdns3_next_request(&priv_ep->pending_req_list); 400 priv_req = to_cdns3_request(request); 401 if ((priv_req->flags & REQUEST_INTERNAL) || 402 (priv_ep->flags & EP_TDLCHK_EN) || 403 priv_ep->use_streams) { 404 dev_dbg(priv_dev->dev, "Blocking external request\n"); 405 return ret; 406 } 407 } 408 409 while (!list_empty(&priv_ep->deferred_req_list)) { 410 request = cdns3_next_request(&priv_ep->deferred_req_list); 411 412 if (!priv_ep->use_streams) { 413 ret = cdns3_ep_run_transfer(priv_ep, request); 414 } else { 415 priv_ep->stream_sg_idx = 0; 416 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 417 } 418 if (ret) 419 return ret; 420 421 list_move_tail(&request->list, &priv_ep->pending_req_list); 422 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 423 break; 424 } 425 426 priv_ep->flags &= ~EP_RING_FULL; 427 return ret; 428 } 429 430 /* 431 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 432 * driver try to detect whether endpoint need additional internal 433 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 434 * if before first DESCMISS interrupt the DMA will be armed. 435 */ 436 #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 437 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 438 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 439 (reg) |= EP_STS_EN_DESCMISEN; \ 440 } } while (0) 441 442 static void __cdns3_descmiss_copy_data(struct usb_request *request, 443 struct usb_request *descmiss_req) 444 { 445 int length = request->actual + descmiss_req->actual; 446 struct scatterlist *s = request->sg; 447 448 if (!s) { 449 if (length <= request->length) { 450 memcpy(&((u8 *)request->buf)[request->actual], 451 descmiss_req->buf, 452 descmiss_req->actual); 453 request->actual = length; 454 } else { 455 /* It should never occures */ 456 request->status = -ENOMEM; 457 } 458 } else { 459 if (length <= sg_dma_len(s)) { 460 void *p = phys_to_virt(sg_dma_address(s)); 461 462 memcpy(&((u8 *)p)[request->actual], 463 descmiss_req->buf, 464 descmiss_req->actual); 465 request->actual = length; 466 } else { 467 request->status = -ENOMEM; 468 } 469 } 470 } 471 472 /** 473 * cdns3_wa2_descmiss_copy_data - copy data from internal requests to 474 * request queued by class driver. 475 * @priv_ep: extended endpoint object 476 * @request: request object 477 */ 478 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 479 struct usb_request *request) 480 { 481 struct usb_request *descmiss_req; 482 struct cdns3_request *descmiss_priv_req; 483 484 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 485 int chunk_end; 486 487 descmiss_priv_req = 488 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 489 descmiss_req = &descmiss_priv_req->request; 490 491 /* driver can't touch pending request */ 492 if (descmiss_priv_req->flags & REQUEST_PENDING) 493 break; 494 495 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 496 request->status = descmiss_req->status; 497 __cdns3_descmiss_copy_data(request, descmiss_req); 498 list_del_init(&descmiss_priv_req->list); 499 kfree(descmiss_req->buf); 500 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 501 --priv_ep->wa2_counter; 502 503 if (!chunk_end) 504 break; 505 } 506 } 507 508 static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 509 struct cdns3_endpoint *priv_ep, 510 struct cdns3_request *priv_req) 511 { 512 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 513 priv_req->flags & REQUEST_INTERNAL) { 514 struct usb_request *req; 515 516 req = cdns3_next_request(&priv_ep->deferred_req_list); 517 518 priv_ep->descmis_req = NULL; 519 520 if (!req) 521 return NULL; 522 523 /* unmap the gadget request before copying data */ 524 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 525 priv_ep->dir); 526 527 cdns3_wa2_descmiss_copy_data(priv_ep, req); 528 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 529 req->length != req->actual) { 530 /* wait for next part of transfer */ 531 /* re-map the gadget request buffer*/ 532 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 533 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 534 return NULL; 535 } 536 537 if (req->status == -EINPROGRESS) 538 req->status = 0; 539 540 list_del_init(&req->list); 541 cdns3_start_all_request(priv_dev, priv_ep); 542 return req; 543 } 544 545 return &priv_req->request; 546 } 547 548 static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 549 struct cdns3_endpoint *priv_ep, 550 struct cdns3_request *priv_req) 551 { 552 int deferred = 0; 553 554 /* 555 * If transfer was queued before DESCMISS appear than we 556 * can disable handling of DESCMISS interrupt. Driver assumes that it 557 * can disable special treatment for this endpoint. 558 */ 559 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 560 u32 reg; 561 562 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 563 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 564 reg = readl(&priv_dev->regs->ep_sts_en); 565 reg &= ~EP_STS_EN_DESCMISEN; 566 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 567 writel(reg, &priv_dev->regs->ep_sts_en); 568 } 569 570 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 571 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 572 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 573 574 /* 575 * DESCMISS transfer has been finished, so data will be 576 * directly copied from internal allocated usb_request 577 * objects. 578 */ 579 if (pending_empty && !descmiss_empty && 580 !(priv_req->flags & REQUEST_INTERNAL)) { 581 cdns3_wa2_descmiss_copy_data(priv_ep, 582 &priv_req->request); 583 584 trace_cdns3_wa2(priv_ep, "get internal stored data"); 585 586 list_add_tail(&priv_req->request.list, 587 &priv_ep->pending_req_list); 588 cdns3_gadget_giveback(priv_ep, priv_req, 589 priv_req->request.status); 590 591 /* 592 * Intentionally driver returns positive value as 593 * correct value. It informs that transfer has 594 * been finished. 595 */ 596 return EINPROGRESS; 597 } 598 599 /* 600 * Driver will wait for completion DESCMISS transfer, 601 * before starts new, not DESCMISS transfer. 602 */ 603 if (!pending_empty && !descmiss_empty) { 604 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 605 deferred = 1; 606 } 607 608 if (priv_req->flags & REQUEST_INTERNAL) 609 list_add_tail(&priv_req->list, 610 &priv_ep->wa2_descmiss_req_list); 611 } 612 613 return deferred; 614 } 615 616 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 617 { 618 struct cdns3_request *priv_req; 619 620 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 621 u8 chain; 622 623 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 624 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 625 626 trace_cdns3_wa2(priv_ep, "removes eldest request"); 627 628 kfree(priv_req->request.buf); 629 list_del_init(&priv_req->list); 630 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 631 &priv_req->request); 632 --priv_ep->wa2_counter; 633 634 if (!chain) 635 break; 636 } 637 } 638 639 /** 640 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 641 * @priv_ep: extended gadget object 642 * 643 * This function is used only for WA2. For more information see Work around 2 644 * description. 645 */ 646 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 647 { 648 struct cdns3_request *priv_req; 649 struct usb_request *request; 650 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 651 652 /* check for pending transfer */ 653 if (!pending_empty) { 654 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 655 return; 656 } 657 658 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 659 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 660 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 661 } 662 663 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 664 665 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 666 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 667 cdns3_wa2_remove_old_request(priv_ep); 668 } 669 670 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 671 GFP_ATOMIC); 672 if (!request) 673 goto err; 674 675 priv_req = to_cdns3_request(request); 676 priv_req->flags |= REQUEST_INTERNAL; 677 678 /* if this field is still assigned it indicate that transfer related 679 * with this request has not been finished yet. Driver in this 680 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 681 * flag to previous one. It will indicate that current request is 682 * part of the previous one. 683 */ 684 if (priv_ep->descmis_req) 685 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 686 687 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 688 GFP_ATOMIC); 689 priv_ep->wa2_counter++; 690 691 if (!priv_req->request.buf) { 692 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 693 goto err; 694 } 695 696 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 697 priv_ep->descmis_req = priv_req; 698 699 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 700 &priv_ep->descmis_req->request, 701 GFP_ATOMIC); 702 703 return; 704 705 err: 706 dev_err(priv_ep->cdns3_dev->dev, 707 "Failed: No sufficient memory for DESCMIS\n"); 708 } 709 710 static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 711 { 712 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 713 714 if (tdl) { 715 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 716 717 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 718 &priv_dev->regs->ep_cmd); 719 } 720 } 721 722 static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 723 { 724 u32 ep_sts_reg; 725 726 /* select EP0-out */ 727 cdns3_select_ep(priv_dev, 0); 728 729 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 730 731 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 732 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 733 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 734 735 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 736 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 737 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 738 739 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 740 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 741 !pending_empty) { 742 } else { 743 u32 ep_sts_en_reg; 744 u32 ep_cmd_reg; 745 746 cdns3_select_ep(priv_dev, outq_ep->num | 747 outq_ep->dir); 748 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 749 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 750 751 outq_ep->flags |= EP_TDLCHK_EN; 752 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 753 EP_CFG_TDL_CHK); 754 755 cdns3_wa2_enable_detection(priv_dev, outq_ep, 756 ep_sts_en_reg); 757 writel(ep_sts_en_reg, 758 &priv_dev->regs->ep_sts_en); 759 /* reset tdl value to zero */ 760 cdns3_wa2_reset_tdl(priv_dev); 761 /* 762 * Memory barrier - Reset tdl before ringing the 763 * doorbell. 764 */ 765 wmb(); 766 if (EP_CMD_DRDY & ep_cmd_reg) { 767 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 768 769 } else { 770 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 771 /* 772 * ring doorbell to generate DESCMIS irq 773 */ 774 writel(EP_CMD_DRDY, 775 &priv_dev->regs->ep_cmd); 776 } 777 } 778 } 779 } 780 } 781 782 /** 783 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 784 * @priv_ep: The endpoint to whom the request belongs to 785 * @priv_req: The request we're giving back 786 * @status: completion code for the request 787 * 788 * Must be called with controller's lock held and interrupts disabled. This 789 * function will unmap @req and call its ->complete() callback to notify upper 790 * layers that it has completed. 791 */ 792 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 793 struct cdns3_request *priv_req, 794 int status) 795 { 796 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 797 struct usb_request *request = &priv_req->request; 798 799 list_del_init(&request->list); 800 801 if (request->status == -EINPROGRESS) 802 request->status = status; 803 804 if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) 805 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 806 priv_ep->dir); 807 808 if ((priv_req->flags & REQUEST_UNALIGNED) && 809 priv_ep->dir == USB_DIR_OUT && !request->status) { 810 /* Make DMA buffer CPU accessible */ 811 dma_sync_single_for_cpu(priv_dev->sysdev, 812 priv_req->aligned_buf->dma, 813 request->actual, 814 priv_req->aligned_buf->dir); 815 memcpy(request->buf, priv_req->aligned_buf->buf, 816 request->actual); 817 } 818 819 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 820 /* All TRBs have finished, clear the counter */ 821 priv_req->finished_trb = 0; 822 trace_cdns3_gadget_giveback(priv_req); 823 824 if (priv_dev->dev_ver < DEV_VER_V2) { 825 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 826 priv_req); 827 if (!request) 828 return; 829 } 830 831 if (request->complete) { 832 spin_unlock(&priv_dev->lock); 833 usb_gadget_giveback_request(&priv_ep->endpoint, 834 request); 835 spin_lock(&priv_dev->lock); 836 } 837 838 if (request->buf == priv_dev->zlp_buf) 839 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 840 } 841 842 static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 843 { 844 /* Work around for stale data address in TRB*/ 845 if (priv_ep->wa1_set) { 846 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 847 848 priv_ep->wa1_set = 0; 849 priv_ep->wa1_trb_index = 0xFFFF; 850 if (priv_ep->wa1_cycle_bit) { 851 priv_ep->wa1_trb->control = 852 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 853 } else { 854 priv_ep->wa1_trb->control = 855 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 856 } 857 } 858 } 859 860 static void cdns3_free_aligned_request_buf(struct work_struct *work) 861 { 862 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 863 aligned_buf_wq); 864 struct cdns3_aligned_buf *buf, *tmp; 865 unsigned long flags; 866 867 spin_lock_irqsave(&priv_dev->lock, flags); 868 869 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 870 if (!buf->in_use) { 871 list_del(&buf->list); 872 873 /* 874 * Re-enable interrupts to free DMA capable memory. 875 * Driver can't free this memory with disabled 876 * interrupts. 877 */ 878 spin_unlock_irqrestore(&priv_dev->lock, flags); 879 dma_free_noncoherent(priv_dev->sysdev, buf->size, 880 buf->buf, buf->dma, buf->dir); 881 kfree(buf); 882 spin_lock_irqsave(&priv_dev->lock, flags); 883 } 884 } 885 886 spin_unlock_irqrestore(&priv_dev->lock, flags); 887 } 888 889 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 890 { 891 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 892 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 893 struct cdns3_aligned_buf *buf; 894 895 /* check if buffer is aligned to 8. */ 896 if (!((uintptr_t)priv_req->request.buf & 0x7)) 897 return 0; 898 899 buf = priv_req->aligned_buf; 900 901 if (!buf || priv_req->request.length > buf->size) { 902 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 903 if (!buf) 904 return -ENOMEM; 905 906 buf->size = priv_req->request.length; 907 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? 908 DMA_TO_DEVICE : DMA_FROM_DEVICE; 909 910 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, 911 buf->size, 912 &buf->dma, 913 buf->dir, 914 GFP_ATOMIC); 915 if (!buf->buf) { 916 kfree(buf); 917 return -ENOMEM; 918 } 919 920 if (priv_req->aligned_buf) { 921 trace_cdns3_free_aligned_request(priv_req); 922 priv_req->aligned_buf->in_use = 0; 923 queue_work(system_freezable_wq, 924 &priv_dev->aligned_buf_wq); 925 } 926 927 buf->in_use = 1; 928 priv_req->aligned_buf = buf; 929 930 list_add_tail(&buf->list, 931 &priv_dev->aligned_buf_list); 932 } 933 934 if (priv_ep->dir == USB_DIR_IN) { 935 /* Make DMA buffer CPU accessible */ 936 dma_sync_single_for_cpu(priv_dev->sysdev, 937 buf->dma, buf->size, buf->dir); 938 memcpy(buf->buf, priv_req->request.buf, 939 priv_req->request.length); 940 } 941 942 /* Transfer DMA buffer ownership back to device */ 943 dma_sync_single_for_device(priv_dev->sysdev, 944 buf->dma, buf->size, buf->dir); 945 946 priv_req->flags |= REQUEST_UNALIGNED; 947 trace_cdns3_prepare_aligned_request(priv_req); 948 949 return 0; 950 } 951 952 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 953 struct cdns3_trb *trb) 954 { 955 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 956 957 if (!priv_ep->wa1_set) { 958 u32 doorbell; 959 960 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 961 962 if (doorbell) { 963 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 964 priv_ep->wa1_set = 1; 965 priv_ep->wa1_trb = trb; 966 priv_ep->wa1_trb_index = priv_ep->enqueue; 967 trace_cdns3_wa1(priv_ep, "set guard"); 968 return 0; 969 } 970 } 971 return 1; 972 } 973 974 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 975 struct cdns3_endpoint *priv_ep) 976 { 977 int dma_index; 978 u32 doorbell; 979 980 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 981 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 982 983 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 984 cdns3_wa1_restore_cycle_bit(priv_ep); 985 } 986 987 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 988 struct usb_request *request) 989 { 990 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 991 struct cdns3_request *priv_req; 992 struct cdns3_trb *trb; 993 dma_addr_t trb_dma; 994 int address; 995 u32 control; 996 u32 length; 997 u32 tdl; 998 unsigned int sg_idx = priv_ep->stream_sg_idx; 999 1000 priv_req = to_cdns3_request(request); 1001 address = priv_ep->endpoint.desc->bEndpointAddress; 1002 1003 priv_ep->flags |= EP_PENDING_REQUEST; 1004 1005 /* must allocate buffer aligned to 8 */ 1006 if (priv_req->flags & REQUEST_UNALIGNED) 1007 trb_dma = priv_req->aligned_buf->dma; 1008 else 1009 trb_dma = request->dma; 1010 1011 /* For stream capable endpoints driver use only single TD. */ 1012 trb = priv_ep->trb_pool + priv_ep->enqueue; 1013 priv_req->start_trb = priv_ep->enqueue; 1014 priv_req->end_trb = priv_req->start_trb; 1015 priv_req->trb = trb; 1016 1017 cdns3_select_ep(priv_ep->cdns3_dev, address); 1018 1019 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1020 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1021 1022 if (!request->num_sgs) { 1023 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1024 length = request->length; 1025 } else { 1026 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1027 length = request->sg[sg_idx].length; 1028 } 1029 1030 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1031 1032 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1033 1034 /* 1035 * For DEV_VER_V2 controller version we have enabled 1036 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1037 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1038 */ 1039 if (priv_dev->dev_ver >= DEV_VER_V2) { 1040 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1041 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1042 } 1043 priv_req->flags |= REQUEST_PENDING; 1044 1045 trb->control = cpu_to_le32(control); 1046 1047 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1048 1049 /* 1050 * Memory barrier - Cycle Bit must be set before trb->length and 1051 * trb->buffer fields. 1052 */ 1053 wmb(); 1054 1055 /* always first element */ 1056 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1057 &priv_dev->regs->ep_traddr); 1058 1059 if (!(priv_ep->flags & EP_STALLED)) { 1060 trace_cdns3_ring(priv_ep); 1061 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1062 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1063 1064 priv_ep->prime_flag = false; 1065 1066 /* 1067 * Controller version DEV_VER_V2 tdl calculation 1068 * is based on TRB 1069 */ 1070 1071 if (priv_dev->dev_ver < DEV_VER_V2) 1072 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1073 &priv_dev->regs->ep_cmd); 1074 else if (priv_dev->dev_ver > DEV_VER_V2) 1075 writel(tdl, &priv_dev->regs->ep_tdl); 1076 1077 priv_ep->last_stream_id = priv_req->request.stream_id; 1078 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1079 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1080 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1081 1082 trace_cdns3_doorbell_epx(priv_ep->name, 1083 readl(&priv_dev->regs->ep_traddr)); 1084 } 1085 1086 /* WORKAROUND for transition to L0 */ 1087 __cdns3_gadget_wakeup(priv_dev); 1088 1089 return 0; 1090 } 1091 1092 static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1093 { 1094 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1095 1096 if (priv_dev->dev_ver < DEV_VER_V3) 1097 return; 1098 1099 if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1100 writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1101 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1102 } 1103 } 1104 1105 /** 1106 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1107 * @priv_ep: endpoint object 1108 * @request: request object 1109 * 1110 * Returns zero on success or negative value on failure 1111 */ 1112 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1113 struct usb_request *request) 1114 { 1115 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1116 struct cdns3_request *priv_req; 1117 struct cdns3_trb *trb; 1118 struct cdns3_trb *link_trb = NULL; 1119 dma_addr_t trb_dma; 1120 u32 togle_pcs = 1; 1121 int sg_iter = 0; 1122 int num_trb; 1123 int address; 1124 u32 control; 1125 int pcs; 1126 u16 total_tdl = 0; 1127 struct scatterlist *s = NULL; 1128 bool sg_supported = !!(request->num_mapped_sgs); 1129 1130 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1131 num_trb = priv_ep->interval; 1132 else 1133 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1134 1135 if (num_trb > priv_ep->free_trbs) { 1136 priv_ep->flags |= EP_RING_FULL; 1137 return -ENOBUFS; 1138 } 1139 1140 priv_req = to_cdns3_request(request); 1141 address = priv_ep->endpoint.desc->bEndpointAddress; 1142 1143 priv_ep->flags |= EP_PENDING_REQUEST; 1144 1145 /* must allocate buffer aligned to 8 */ 1146 if (priv_req->flags & REQUEST_UNALIGNED) 1147 trb_dma = priv_req->aligned_buf->dma; 1148 else 1149 trb_dma = request->dma; 1150 1151 trb = priv_ep->trb_pool + priv_ep->enqueue; 1152 priv_req->start_trb = priv_ep->enqueue; 1153 priv_req->trb = trb; 1154 1155 cdns3_select_ep(priv_ep->cdns3_dev, address); 1156 1157 /* prepare ring */ 1158 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1159 int doorbell, dma_index; 1160 u32 ch_bit = 0; 1161 1162 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1163 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1164 1165 /* Driver can't update LINK TRB if it is current processed. */ 1166 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1167 priv_ep->flags |= EP_DEFERRED_DRDY; 1168 return -ENOBUFS; 1169 } 1170 1171 /*updating C bt in Link TRB before starting DMA*/ 1172 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1173 /* 1174 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1175 * that DMA stuck at the LINK TRB. 1176 * On the other hand, removing TRB_CHAIN for longer TRs for 1177 * epXout cause that DMA stuck after handling LINK TRB. 1178 * To eliminate this strange behavioral driver set TRB_CHAIN 1179 * bit only for TR size > 2. 1180 */ 1181 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1182 TRBS_PER_SEGMENT > 2) 1183 ch_bit = TRB_CHAIN; 1184 1185 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1186 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1187 } 1188 1189 if (priv_dev->dev_ver <= DEV_VER_V2) 1190 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1191 1192 if (sg_supported) 1193 s = request->sg; 1194 1195 /* set incorrect Cycle Bit for first trb*/ 1196 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1197 trb->length = 0; 1198 if (priv_dev->dev_ver >= DEV_VER_V2) { 1199 u16 td_size; 1200 1201 td_size = DIV_ROUND_UP(request->length, 1202 priv_ep->endpoint.maxpacket); 1203 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1204 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1205 else 1206 control |= TRB_TDL_HS_SIZE(td_size); 1207 } 1208 1209 do { 1210 u32 length; 1211 1212 /* fill TRB */ 1213 control |= TRB_TYPE(TRB_NORMAL); 1214 if (sg_supported) { 1215 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1216 length = sg_dma_len(s); 1217 } else { 1218 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1219 length = request->length; 1220 } 1221 1222 if (priv_ep->flags & EP_TDLCHK_EN) 1223 total_tdl += DIV_ROUND_UP(length, 1224 priv_ep->endpoint.maxpacket); 1225 1226 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1227 TRB_LEN(length)); 1228 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1229 1230 /* 1231 * first trb should be prepared as last to avoid processing 1232 * transfer to early 1233 */ 1234 if (sg_iter != 0) 1235 control |= pcs; 1236 1237 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1238 control |= TRB_IOC | TRB_ISP; 1239 } else { 1240 /* for last element in TD or in SG list */ 1241 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1242 control |= pcs | TRB_IOC | TRB_ISP; 1243 } 1244 1245 if (sg_iter) 1246 trb->control = cpu_to_le32(control); 1247 else 1248 priv_req->trb->control = cpu_to_le32(control); 1249 1250 if (sg_supported) { 1251 trb->control |= cpu_to_le32(TRB_ISP); 1252 /* Don't set chain bit for last TRB */ 1253 if (sg_iter < num_trb - 1) 1254 trb->control |= cpu_to_le32(TRB_CHAIN); 1255 1256 s = sg_next(s); 1257 } 1258 1259 control = 0; 1260 ++sg_iter; 1261 priv_req->end_trb = priv_ep->enqueue; 1262 cdns3_ep_inc_enq(priv_ep); 1263 trb = priv_ep->trb_pool + priv_ep->enqueue; 1264 trb->length = 0; 1265 } while (sg_iter < num_trb); 1266 1267 trb = priv_req->trb; 1268 1269 priv_req->flags |= REQUEST_PENDING; 1270 priv_req->num_of_trb = num_trb; 1271 1272 if (sg_iter == 1) 1273 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1274 1275 if (priv_dev->dev_ver < DEV_VER_V2 && 1276 (priv_ep->flags & EP_TDLCHK_EN)) { 1277 u16 tdl = total_tdl; 1278 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1279 1280 if (tdl > EP_CMD_TDL_MAX) { 1281 tdl = EP_CMD_TDL_MAX; 1282 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1283 } 1284 1285 if (old_tdl < tdl) { 1286 tdl -= old_tdl; 1287 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1288 &priv_dev->regs->ep_cmd); 1289 } 1290 } 1291 1292 /* 1293 * Memory barrier - cycle bit must be set before other filds in trb. 1294 */ 1295 wmb(); 1296 1297 /* give the TD to the consumer*/ 1298 if (togle_pcs) 1299 trb->control = trb->control ^ cpu_to_le32(1); 1300 1301 if (priv_dev->dev_ver <= DEV_VER_V2) 1302 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1303 1304 if (num_trb > 1) { 1305 int i = 0; 1306 1307 while (i < num_trb) { 1308 trace_cdns3_prepare_trb(priv_ep, trb + i); 1309 if (trb + i == link_trb) { 1310 trb = priv_ep->trb_pool; 1311 num_trb = num_trb - i; 1312 i = 0; 1313 } else { 1314 i++; 1315 } 1316 } 1317 } else { 1318 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1319 } 1320 1321 /* 1322 * Memory barrier - Cycle Bit must be set before trb->length and 1323 * trb->buffer fields. 1324 */ 1325 wmb(); 1326 1327 /* 1328 * For DMULT mode we can set address to transfer ring only once after 1329 * enabling endpoint. 1330 */ 1331 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1332 /* 1333 * Until SW is not ready to handle the OUT transfer the ISO OUT 1334 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1335 * EP_CFG_ENABLE must be set before updating ep_traddr. 1336 */ 1337 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1338 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1339 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1340 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1341 EP_CFG_ENABLE); 1342 } 1343 1344 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1345 priv_req->start_trb * TRB_SIZE), 1346 &priv_dev->regs->ep_traddr); 1347 1348 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1349 } 1350 1351 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1352 trace_cdns3_ring(priv_ep); 1353 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1354 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1355 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1356 cdns3_rearm_drdy_if_needed(priv_ep); 1357 trace_cdns3_doorbell_epx(priv_ep->name, 1358 readl(&priv_dev->regs->ep_traddr)); 1359 } 1360 1361 /* WORKAROUND for transition to L0 */ 1362 __cdns3_gadget_wakeup(priv_dev); 1363 1364 return 0; 1365 } 1366 1367 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1368 { 1369 struct cdns3_endpoint *priv_ep; 1370 struct usb_ep *ep; 1371 1372 if (priv_dev->hw_configured_flag) 1373 return; 1374 1375 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1376 1377 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1378 USB_CONF_U1EN | USB_CONF_U2EN); 1379 1380 priv_dev->hw_configured_flag = 1; 1381 1382 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1383 if (ep->enabled) { 1384 priv_ep = ep_to_cdns3_ep(ep); 1385 cdns3_start_all_request(priv_dev, priv_ep); 1386 } 1387 } 1388 1389 cdns3_allow_enable_l1(priv_dev, 1); 1390 } 1391 1392 /** 1393 * cdns3_trb_handled - check whether trb has been handled by DMA 1394 * 1395 * @priv_ep: extended endpoint object. 1396 * @priv_req: request object for checking 1397 * 1398 * Endpoint must be selected before invoking this function. 1399 * 1400 * Returns false if request has not been handled by DMA, else returns true. 1401 * 1402 * SR - start ring 1403 * ER - end ring 1404 * DQ = priv_ep->dequeue - dequeue position 1405 * EQ = priv_ep->enqueue - enqueue position 1406 * ST = priv_req->start_trb - index of first TRB in transfer ring 1407 * ET = priv_req->end_trb - index of last TRB in transfer ring 1408 * CI = current_index - index of processed TRB by DMA. 1409 * 1410 * As first step, we check if the TRB between the ST and ET. 1411 * Then, we check if cycle bit for index priv_ep->dequeue 1412 * is correct. 1413 * 1414 * some rules: 1415 * 1. priv_ep->dequeue never equals to current_index. 1416 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1417 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1418 * and priv_ep->free_trbs is zero. 1419 * This case indicate that TR is full. 1420 * 1421 * At below two cases, the request have been handled. 1422 * Case 1 - priv_ep->dequeue < current_index 1423 * SR ... EQ ... DQ ... CI ... ER 1424 * SR ... DQ ... CI ... EQ ... ER 1425 * 1426 * Case 2 - priv_ep->dequeue > current_index 1427 * This situation takes place when CI go through the LINK TRB at the end of 1428 * transfer ring. 1429 * SR ... CI ... EQ ... DQ ... ER 1430 */ 1431 static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1432 struct cdns3_request *priv_req) 1433 { 1434 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1435 struct cdns3_trb *trb; 1436 int current_index = 0; 1437 int handled = 0; 1438 int doorbell; 1439 1440 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1441 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1442 1443 /* current trb doesn't belong to this request */ 1444 if (priv_req->start_trb < priv_req->end_trb) { 1445 if (priv_ep->dequeue > priv_req->end_trb) 1446 goto finish; 1447 1448 if (priv_ep->dequeue < priv_req->start_trb) 1449 goto finish; 1450 } 1451 1452 if ((priv_req->start_trb > priv_req->end_trb) && 1453 (priv_ep->dequeue > priv_req->end_trb) && 1454 (priv_ep->dequeue < priv_req->start_trb)) 1455 goto finish; 1456 1457 if ((priv_req->start_trb == priv_req->end_trb) && 1458 (priv_ep->dequeue != priv_req->end_trb)) 1459 goto finish; 1460 1461 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1462 1463 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1464 goto finish; 1465 1466 if (doorbell == 1 && current_index == priv_ep->dequeue) 1467 goto finish; 1468 1469 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1470 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1471 handled = 1; 1472 goto finish; 1473 } 1474 1475 if (priv_ep->enqueue == priv_ep->dequeue && 1476 priv_ep->free_trbs == 0) { 1477 handled = 1; 1478 } else if (priv_ep->dequeue < current_index) { 1479 if ((current_index == (priv_ep->num_trbs - 1)) && 1480 !priv_ep->dequeue) 1481 goto finish; 1482 1483 handled = 1; 1484 } else if (priv_ep->dequeue > current_index) { 1485 handled = 1; 1486 } 1487 1488 finish: 1489 trace_cdns3_request_handled(priv_req, current_index, handled); 1490 1491 return handled; 1492 } 1493 1494 static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1495 struct cdns3_endpoint *priv_ep) 1496 { 1497 struct cdns3_request *priv_req; 1498 struct usb_request *request; 1499 struct cdns3_trb *trb; 1500 bool request_handled = false; 1501 bool transfer_end = false; 1502 1503 while (!list_empty(&priv_ep->pending_req_list)) { 1504 request = cdns3_next_request(&priv_ep->pending_req_list); 1505 priv_req = to_cdns3_request(request); 1506 1507 trb = priv_ep->trb_pool + priv_ep->dequeue; 1508 1509 /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ 1510 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1511 trace_cdns3_complete_trb(priv_ep, trb); 1512 cdns3_ep_inc_deq(priv_ep); 1513 trb = priv_ep->trb_pool + priv_ep->dequeue; 1514 } 1515 1516 if (!request->stream_id) { 1517 /* Re-select endpoint. It could be changed by other CPU 1518 * during handling usb_gadget_giveback_request. 1519 */ 1520 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1521 1522 while (cdns3_trb_handled(priv_ep, priv_req)) { 1523 priv_req->finished_trb++; 1524 if (priv_req->finished_trb >= priv_req->num_of_trb) 1525 request_handled = true; 1526 1527 trb = priv_ep->trb_pool + priv_ep->dequeue; 1528 trace_cdns3_complete_trb(priv_ep, trb); 1529 1530 if (!transfer_end) 1531 request->actual += 1532 TRB_LEN(le32_to_cpu(trb->length)); 1533 1534 if (priv_req->num_of_trb > 1 && 1535 le32_to_cpu(trb->control) & TRB_SMM && 1536 le32_to_cpu(trb->control) & TRB_CHAIN) 1537 transfer_end = true; 1538 1539 cdns3_ep_inc_deq(priv_ep); 1540 } 1541 1542 if (request_handled) { 1543 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1544 request_handled = false; 1545 transfer_end = false; 1546 } else { 1547 goto prepare_next_td; 1548 } 1549 1550 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1551 TRBS_PER_SEGMENT == 2) 1552 break; 1553 } else { 1554 /* Re-select endpoint. It could be changed by other CPU 1555 * during handling usb_gadget_giveback_request. 1556 */ 1557 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1558 1559 trb = priv_ep->trb_pool; 1560 trace_cdns3_complete_trb(priv_ep, trb); 1561 1562 if (trb != priv_req->trb) 1563 dev_warn(priv_dev->dev, 1564 "request_trb=0x%p, queue_trb=0x%p\n", 1565 priv_req->trb, trb); 1566 1567 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1568 1569 if (!request->num_sgs || 1570 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1571 priv_ep->stream_sg_idx = 0; 1572 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1573 } else { 1574 priv_ep->stream_sg_idx++; 1575 cdns3_ep_run_stream_transfer(priv_ep, request); 1576 } 1577 break; 1578 } 1579 } 1580 priv_ep->flags &= ~EP_PENDING_REQUEST; 1581 1582 prepare_next_td: 1583 if (!(priv_ep->flags & EP_STALLED) && 1584 !(priv_ep->flags & EP_STALL_PENDING)) 1585 cdns3_start_all_request(priv_dev, priv_ep); 1586 } 1587 1588 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1589 { 1590 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1591 1592 cdns3_wa1_restore_cycle_bit(priv_ep); 1593 1594 if (rearm) { 1595 trace_cdns3_ring(priv_ep); 1596 1597 /* Cycle Bit must be updated before arming DMA. */ 1598 wmb(); 1599 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1600 1601 __cdns3_gadget_wakeup(priv_dev); 1602 1603 trace_cdns3_doorbell_epx(priv_ep->name, 1604 readl(&priv_dev->regs->ep_traddr)); 1605 } 1606 } 1607 1608 static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1609 { 1610 u16 tdl = priv_ep->pending_tdl; 1611 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1612 1613 if (tdl > EP_CMD_TDL_MAX) { 1614 tdl = EP_CMD_TDL_MAX; 1615 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1616 } else { 1617 priv_ep->pending_tdl = 0; 1618 } 1619 1620 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1621 } 1622 1623 /** 1624 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1625 * @priv_ep: endpoint object 1626 * 1627 * Returns 0 1628 */ 1629 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1630 { 1631 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1632 u32 ep_sts_reg; 1633 struct usb_request *deferred_request; 1634 struct usb_request *pending_request; 1635 u32 tdl = 0; 1636 1637 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1638 1639 trace_cdns3_epx_irq(priv_dev, priv_ep); 1640 1641 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1642 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1643 1644 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1645 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1646 1647 tdl = cdns3_get_tdl(priv_dev); 1648 1649 /* 1650 * Continue the previous transfer: 1651 * There is some racing between ERDY and PRIME. The device send 1652 * ERDY and almost in the same time Host send PRIME. It cause 1653 * that host ignore the ERDY packet and driver has to send it 1654 * again. 1655 */ 1656 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1657 EP_STS_HOSTPP(ep_sts_reg))) { 1658 writel(EP_CMD_ERDY | 1659 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1660 &priv_dev->regs->ep_cmd); 1661 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1662 } else { 1663 priv_ep->prime_flag = true; 1664 1665 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1666 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1667 1668 if (deferred_request && !pending_request) { 1669 cdns3_start_all_request(priv_dev, priv_ep); 1670 } 1671 } 1672 } 1673 1674 if (ep_sts_reg & EP_STS_TRBERR) { 1675 if (priv_ep->flags & EP_STALL_PENDING && 1676 !(ep_sts_reg & EP_STS_DESCMIS && 1677 priv_dev->dev_ver < DEV_VER_V2)) { 1678 cdns3_ep_stall_flush(priv_ep); 1679 } 1680 1681 /* 1682 * For isochronous transfer driver completes request on 1683 * IOC or on TRBERR. IOC appears only when device receive 1684 * OUT data packet. If host disable stream or lost some packet 1685 * then the only way to finish all queued transfer is to do it 1686 * on TRBERR event. 1687 */ 1688 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1689 !priv_ep->wa1_set) { 1690 if (!priv_ep->dir) { 1691 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1692 1693 ep_cfg &= ~EP_CFG_ENABLE; 1694 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1695 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1696 priv_ep->flags |= EP_UPDATE_EP_TRBADDR; 1697 } 1698 cdns3_transfer_completed(priv_dev, priv_ep); 1699 } else if (!(priv_ep->flags & EP_STALLED) && 1700 !(priv_ep->flags & EP_STALL_PENDING)) { 1701 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1702 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1703 cdns3_start_all_request(priv_dev, priv_ep); 1704 } else { 1705 cdns3_rearm_transfer(priv_ep, 1706 priv_ep->wa1_set); 1707 } 1708 } 1709 } 1710 1711 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1712 (ep_sts_reg & EP_STS_IOT)) { 1713 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1714 if (ep_sts_reg & EP_STS_ISP) 1715 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1716 else 1717 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1718 } 1719 1720 if (!priv_ep->use_streams) { 1721 if ((ep_sts_reg & EP_STS_IOC) || 1722 (ep_sts_reg & EP_STS_ISP)) { 1723 cdns3_transfer_completed(priv_dev, priv_ep); 1724 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1725 priv_ep->pending_tdl) { 1726 /* handle IOT with pending tdl */ 1727 cdns3_reprogram_tdl(priv_ep); 1728 } 1729 } else if (priv_ep->dir == USB_DIR_OUT) { 1730 priv_ep->ep_sts_pending |= ep_sts_reg; 1731 } else if (ep_sts_reg & EP_STS_IOT) { 1732 cdns3_transfer_completed(priv_dev, priv_ep); 1733 } 1734 } 1735 1736 /* 1737 * MD_EXIT interrupt sets when stream capable endpoint exits 1738 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1739 */ 1740 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1741 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1742 priv_ep->ep_sts_pending = 0; 1743 cdns3_transfer_completed(priv_dev, priv_ep); 1744 } 1745 1746 /* 1747 * WA2: this condition should only be meet when 1748 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1749 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1750 * In other cases this interrupt will be disabled. 1751 */ 1752 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1753 !(priv_ep->flags & EP_STALLED)) 1754 cdns3_wa2_descmissing_packet(priv_ep); 1755 1756 return 0; 1757 } 1758 1759 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1760 { 1761 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1762 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1763 } 1764 1765 /** 1766 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1767 * @priv_dev: extended gadget object 1768 * @usb_ists: bitmap representation of device's reported interrupts 1769 * (usb_ists register value) 1770 */ 1771 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1772 u32 usb_ists) 1773 __must_hold(&priv_dev->lock) 1774 { 1775 int speed = 0; 1776 1777 trace_cdns3_usb_irq(priv_dev, usb_ists); 1778 if (usb_ists & USB_ISTS_L1ENTI) { 1779 /* 1780 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1781 * from L1. To fix it, if any DMA transfer is pending driver 1782 * must starts driving resume signal immediately. 1783 */ 1784 if (readl(&priv_dev->regs->drbl)) 1785 __cdns3_gadget_wakeup(priv_dev); 1786 } 1787 1788 /* Connection detected */ 1789 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1790 speed = cdns3_get_speed(priv_dev); 1791 priv_dev->gadget.speed = speed; 1792 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1793 cdns3_ep0_config(priv_dev); 1794 } 1795 1796 /* Disconnection detected */ 1797 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1798 spin_unlock(&priv_dev->lock); 1799 cdns3_disconnect_gadget(priv_dev); 1800 spin_lock(&priv_dev->lock); 1801 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1802 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1803 cdns3_hw_reset_eps_config(priv_dev); 1804 } 1805 1806 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1807 if (priv_dev->gadget_driver && 1808 priv_dev->gadget_driver->suspend) { 1809 spin_unlock(&priv_dev->lock); 1810 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1811 spin_lock(&priv_dev->lock); 1812 } 1813 } 1814 1815 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1816 if (priv_dev->gadget_driver && 1817 priv_dev->gadget_driver->resume) { 1818 spin_unlock(&priv_dev->lock); 1819 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1820 spin_lock(&priv_dev->lock); 1821 } 1822 } 1823 1824 /* reset*/ 1825 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1826 if (priv_dev->gadget_driver) { 1827 spin_unlock(&priv_dev->lock); 1828 usb_gadget_udc_reset(&priv_dev->gadget, 1829 priv_dev->gadget_driver); 1830 spin_lock(&priv_dev->lock); 1831 1832 /*read again to check the actual speed*/ 1833 speed = cdns3_get_speed(priv_dev); 1834 priv_dev->gadget.speed = speed; 1835 cdns3_hw_reset_eps_config(priv_dev); 1836 cdns3_ep0_config(priv_dev); 1837 } 1838 } 1839 } 1840 1841 /** 1842 * cdns3_device_irq_handler - interrupt handler for device part of controller 1843 * 1844 * @irq: irq number for cdns3 core device 1845 * @data: structure of cdns3 1846 * 1847 * Returns IRQ_HANDLED or IRQ_NONE 1848 */ 1849 static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1850 { 1851 struct cdns3_device *priv_dev = data; 1852 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1853 irqreturn_t ret = IRQ_NONE; 1854 u32 reg; 1855 1856 if (cdns->in_lpm) 1857 return ret; 1858 1859 /* check USB device interrupt */ 1860 reg = readl(&priv_dev->regs->usb_ists); 1861 if (reg) { 1862 /* After masking interrupts the new interrupts won't be 1863 * reported in usb_ists/ep_ists. In order to not lose some 1864 * of them driver disables only detected interrupts. 1865 * They will be enabled ASAP after clearing source of 1866 * interrupt. This an unusual behavior only applies to 1867 * usb_ists register. 1868 */ 1869 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1870 /* mask deferred interrupt. */ 1871 writel(reg, &priv_dev->regs->usb_ien); 1872 ret = IRQ_WAKE_THREAD; 1873 } 1874 1875 /* check endpoint interrupt */ 1876 reg = readl(&priv_dev->regs->ep_ists); 1877 if (reg) { 1878 writel(0, &priv_dev->regs->ep_ien); 1879 ret = IRQ_WAKE_THREAD; 1880 } 1881 1882 return ret; 1883 } 1884 1885 /** 1886 * cdns3_device_thread_irq_handler - interrupt handler for device part 1887 * of controller 1888 * 1889 * @irq: irq number for cdns3 core device 1890 * @data: structure of cdns3 1891 * 1892 * Returns IRQ_HANDLED or IRQ_NONE 1893 */ 1894 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1895 { 1896 struct cdns3_device *priv_dev = data; 1897 irqreturn_t ret = IRQ_NONE; 1898 unsigned long flags; 1899 unsigned int bit; 1900 unsigned long reg; 1901 1902 spin_lock_irqsave(&priv_dev->lock, flags); 1903 1904 reg = readl(&priv_dev->regs->usb_ists); 1905 if (reg) { 1906 writel(reg, &priv_dev->regs->usb_ists); 1907 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1908 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1909 ret = IRQ_HANDLED; 1910 } 1911 1912 reg = readl(&priv_dev->regs->ep_ists); 1913 1914 /* handle default endpoint OUT */ 1915 if (reg & EP_ISTS_EP_OUT0) { 1916 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1917 ret = IRQ_HANDLED; 1918 } 1919 1920 /* handle default endpoint IN */ 1921 if (reg & EP_ISTS_EP_IN0) { 1922 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1923 ret = IRQ_HANDLED; 1924 } 1925 1926 /* check if interrupt from non default endpoint, if no exit */ 1927 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1928 if (!reg) 1929 goto irqend; 1930 1931 for_each_set_bit(bit, ®, 1932 sizeof(u32) * BITS_PER_BYTE) { 1933 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1934 ret = IRQ_HANDLED; 1935 } 1936 1937 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1938 cdns3_wa2_check_outq_status(priv_dev); 1939 1940 irqend: 1941 writel(~0, &priv_dev->regs->ep_ien); 1942 spin_unlock_irqrestore(&priv_dev->lock, flags); 1943 1944 return ret; 1945 } 1946 1947 /** 1948 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1949 * 1950 * The real reservation will occur during write to EP_CFG register, 1951 * this function is used to check if the 'size' reservation is allowed. 1952 * 1953 * @priv_dev: extended gadget object 1954 * @size: the size (KB) for EP would like to allocate 1955 * @is_in: endpoint direction 1956 * 1957 * Return 0 if the required size can met or negative value on failure 1958 */ 1959 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1960 int size, int is_in) 1961 { 1962 int remained; 1963 1964 /* 2KB are reserved for EP0*/ 1965 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1966 1967 if (is_in) { 1968 if (remained < size) 1969 return -EPERM; 1970 1971 priv_dev->onchip_used_size += size; 1972 } else { 1973 int required; 1974 1975 /** 1976 * ALL OUT EPs are shared the same chunk onchip memory, so 1977 * driver checks if it already has assigned enough buffers 1978 */ 1979 if (priv_dev->out_mem_is_allocated >= size) 1980 return 0; 1981 1982 required = size - priv_dev->out_mem_is_allocated; 1983 1984 if (required > remained) 1985 return -EPERM; 1986 1987 priv_dev->out_mem_is_allocated += required; 1988 priv_dev->onchip_used_size += required; 1989 } 1990 1991 return 0; 1992 } 1993 1994 static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1995 struct cdns3_endpoint *priv_ep) 1996 { 1997 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1998 1999 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 2000 if (priv_dev->dev_ver <= DEV_VER_V2) 2001 writel(USB_CONF_DMULT, ®s->usb_conf); 2002 2003 if (priv_dev->dev_ver == DEV_VER_V2) 2004 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2005 2006 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2007 u32 mask; 2008 2009 if (priv_ep->dir) 2010 mask = BIT(priv_ep->num + 16); 2011 else 2012 mask = BIT(priv_ep->num); 2013 2014 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 2015 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2016 cdns3_set_register_bit(®s->tdl_beh, mask); 2017 cdns3_set_register_bit(®s->tdl_beh2, mask); 2018 cdns3_set_register_bit(®s->dma_adv_td, mask); 2019 } 2020 2021 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2022 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2023 2024 cdns3_set_register_bit(®s->dtrans, mask); 2025 } 2026 } 2027 2028 /** 2029 * cdns3_ep_config - Configure hardware endpoint 2030 * @priv_ep: extended endpoint object 2031 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2032 */ 2033 int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2034 { 2035 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2036 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2037 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2038 u32 max_packet_size = 0; 2039 u8 maxburst = 0; 2040 u32 ep_cfg = 0; 2041 u8 buffering; 2042 u8 mult = 0; 2043 int ret; 2044 2045 buffering = priv_dev->ep_buf_size - 1; 2046 2047 cdns3_configure_dmult(priv_dev, priv_ep); 2048 2049 switch (priv_ep->type) { 2050 case USB_ENDPOINT_XFER_INT: 2051 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2052 2053 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2054 ep_cfg |= EP_CFG_TDL_CHK; 2055 break; 2056 case USB_ENDPOINT_XFER_BULK: 2057 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2058 2059 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2060 ep_cfg |= EP_CFG_TDL_CHK; 2061 break; 2062 default: 2063 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2064 mult = priv_dev->ep_iso_burst - 1; 2065 buffering = mult + 1; 2066 } 2067 2068 switch (priv_dev->gadget.speed) { 2069 case USB_SPEED_FULL: 2070 max_packet_size = is_iso_ep ? 1023 : 64; 2071 break; 2072 case USB_SPEED_HIGH: 2073 max_packet_size = is_iso_ep ? 1024 : 512; 2074 break; 2075 case USB_SPEED_SUPER: 2076 /* It's limitation that driver assumes in driver. */ 2077 mult = 0; 2078 max_packet_size = 1024; 2079 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2080 maxburst = priv_dev->ep_iso_burst - 1; 2081 buffering = (mult + 1) * 2082 (maxburst + 1); 2083 2084 if (priv_ep->interval > 1) 2085 buffering++; 2086 } else { 2087 maxburst = priv_dev->ep_buf_size - 1; 2088 } 2089 break; 2090 default: 2091 /* all other speed are not supported */ 2092 return -EINVAL; 2093 } 2094 2095 if (max_packet_size == 1024) 2096 priv_ep->trb_burst_size = 128; 2097 else if (max_packet_size >= 512) 2098 priv_ep->trb_burst_size = 64; 2099 else 2100 priv_ep->trb_burst_size = 16; 2101 2102 /* 2103 * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs 2104 * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the 2105 * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI 2106 * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This 2107 * results in data corruption when it crosses the 4K border. The corruption 2108 * specifically occurs from the position (4K - (address & 0x7F)) to 4K. 2109 * 2110 * So force trb_burst_size to 16 at such platform. 2111 */ 2112 if (priv_dev->dev_ver < DEV_VER_V2) 2113 priv_ep->trb_burst_size = 16; 2114 2115 mult = min_t(u8, mult, EP_CFG_MULT_MAX); 2116 buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); 2117 maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); 2118 2119 /* onchip buffer is only allocated before configuration */ 2120 if (!priv_dev->hw_configured_flag) { 2121 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2122 !!priv_ep->dir); 2123 if (ret) { 2124 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2125 return ret; 2126 } 2127 } 2128 2129 if (enable) 2130 ep_cfg |= EP_CFG_ENABLE; 2131 2132 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2133 if (priv_dev->dev_ver >= DEV_VER_V3) { 2134 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2135 2136 /* 2137 * Stream capable endpoints are handled by using ep_tdl 2138 * register. Other endpoints use TDL from TRB feature. 2139 */ 2140 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2141 mask); 2142 } 2143 2144 /* Enable Stream Bit TDL chk and SID chk */ 2145 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2146 } 2147 2148 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2149 EP_CFG_MULT(mult) | 2150 EP_CFG_BUFFERING(buffering) | 2151 EP_CFG_MAXBURST(maxburst); 2152 2153 cdns3_select_ep(priv_dev, bEndpointAddress); 2154 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2155 priv_ep->flags |= EP_CONFIGURED; 2156 2157 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2158 priv_ep->name, ep_cfg); 2159 2160 return 0; 2161 } 2162 2163 /* Find correct direction for HW endpoint according to description */ 2164 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2165 struct cdns3_endpoint *priv_ep) 2166 { 2167 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2168 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2169 } 2170 2171 static struct 2172 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2173 struct usb_endpoint_descriptor *desc) 2174 { 2175 struct usb_ep *ep; 2176 struct cdns3_endpoint *priv_ep; 2177 2178 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2179 unsigned long num; 2180 int ret; 2181 /* ep name pattern likes epXin or epXout */ 2182 char c[2] = {ep->name[2], '\0'}; 2183 2184 ret = kstrtoul(c, 10, &num); 2185 if (ret) 2186 return ERR_PTR(ret); 2187 2188 priv_ep = ep_to_cdns3_ep(ep); 2189 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2190 if (!(priv_ep->flags & EP_CLAIMED)) { 2191 priv_ep->num = num; 2192 return priv_ep; 2193 } 2194 } 2195 } 2196 2197 return ERR_PTR(-ENOENT); 2198 } 2199 2200 /* 2201 * Cadence IP has one limitation that all endpoints must be configured 2202 * (Type & MaxPacketSize) before setting configuration through hardware 2203 * register, it means we can't change endpoints configuration after 2204 * set_configuration. 2205 * 2206 * This function set EP_CLAIMED flag which is added when the gadget driver 2207 * uses usb_ep_autoconfig to configure specific endpoint; 2208 * When the udc driver receives set_configurion request, 2209 * it goes through all claimed endpoints, and configure all endpoints 2210 * accordingly. 2211 * 2212 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2213 * ep_cfg register which can be changed after set_configuration, and do 2214 * some software operation accordingly. 2215 */ 2216 static struct 2217 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2218 struct usb_endpoint_descriptor *desc, 2219 struct usb_ss_ep_comp_descriptor *comp_desc) 2220 { 2221 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2222 struct cdns3_endpoint *priv_ep; 2223 unsigned long flags; 2224 2225 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2226 if (IS_ERR(priv_ep)) { 2227 dev_err(priv_dev->dev, "no available ep\n"); 2228 return NULL; 2229 } 2230 2231 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2232 2233 spin_lock_irqsave(&priv_dev->lock, flags); 2234 priv_ep->endpoint.desc = desc; 2235 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2236 priv_ep->type = usb_endpoint_type(desc); 2237 priv_ep->flags |= EP_CLAIMED; 2238 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2239 2240 spin_unlock_irqrestore(&priv_dev->lock, flags); 2241 return &priv_ep->endpoint; 2242 } 2243 2244 /** 2245 * cdns3_gadget_ep_alloc_request - Allocates request 2246 * @ep: endpoint object associated with request 2247 * @gfp_flags: gfp flags 2248 * 2249 * Returns allocated request address, NULL on allocation error 2250 */ 2251 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2252 gfp_t gfp_flags) 2253 { 2254 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2255 struct cdns3_request *priv_req; 2256 2257 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2258 if (!priv_req) 2259 return NULL; 2260 2261 priv_req->priv_ep = priv_ep; 2262 2263 trace_cdns3_alloc_request(priv_req); 2264 return &priv_req->request; 2265 } 2266 2267 /** 2268 * cdns3_gadget_ep_free_request - Free memory occupied by request 2269 * @ep: endpoint object associated with request 2270 * @request: request to free memory 2271 */ 2272 void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2273 struct usb_request *request) 2274 { 2275 struct cdns3_request *priv_req = to_cdns3_request(request); 2276 2277 if (priv_req->aligned_buf) 2278 priv_req->aligned_buf->in_use = 0; 2279 2280 trace_cdns3_free_request(priv_req); 2281 kfree(priv_req); 2282 } 2283 2284 /** 2285 * cdns3_gadget_ep_enable - Enable endpoint 2286 * @ep: endpoint object 2287 * @desc: endpoint descriptor 2288 * 2289 * Returns 0 on success, error code elsewhere 2290 */ 2291 static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2292 const struct usb_endpoint_descriptor *desc) 2293 { 2294 struct cdns3_endpoint *priv_ep; 2295 struct cdns3_device *priv_dev; 2296 const struct usb_ss_ep_comp_descriptor *comp_desc; 2297 u32 reg = EP_STS_EN_TRBERREN; 2298 u32 bEndpointAddress; 2299 unsigned long flags; 2300 int enable = 1; 2301 int ret = 0; 2302 int val; 2303 2304 if (!ep) { 2305 pr_debug("usbss: ep not configured?\n"); 2306 return -EINVAL; 2307 } 2308 2309 priv_ep = ep_to_cdns3_ep(ep); 2310 priv_dev = priv_ep->cdns3_dev; 2311 comp_desc = priv_ep->endpoint.comp_desc; 2312 2313 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2314 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2315 return -EINVAL; 2316 } 2317 2318 if (!desc->wMaxPacketSize) { 2319 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2320 return -EINVAL; 2321 } 2322 2323 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2324 "%s is already enabled\n", priv_ep->name)) 2325 return 0; 2326 2327 spin_lock_irqsave(&priv_dev->lock, flags); 2328 2329 priv_ep->endpoint.desc = desc; 2330 priv_ep->type = usb_endpoint_type(desc); 2331 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2332 2333 if (priv_ep->interval > ISO_MAX_INTERVAL && 2334 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2335 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2336 ISO_MAX_INTERVAL); 2337 2338 ret = -EINVAL; 2339 goto exit; 2340 } 2341 2342 bEndpointAddress = priv_ep->num | priv_ep->dir; 2343 cdns3_select_ep(priv_dev, bEndpointAddress); 2344 2345 /* 2346 * For some versions of controller at some point during ISO OUT traffic 2347 * DMA reads Transfer Ring for the EP which has never got doorbell. 2348 * This issue was detected only on simulation, but to avoid this issue 2349 * driver add protection against it. To fix it driver enable ISO OUT 2350 * endpoint before setting DRBL. This special treatment of ISO OUT 2351 * endpoints are recommended by controller specification. 2352 */ 2353 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2354 enable = 0; 2355 2356 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2357 /* 2358 * Enable stream support (SS mode) related interrupts 2359 * in EP_STS_EN Register 2360 */ 2361 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2362 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2363 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2364 EP_STS_EN_STREAMREN; 2365 priv_ep->use_streams = true; 2366 ret = cdns3_ep_config(priv_ep, enable); 2367 priv_dev->using_streams |= true; 2368 } 2369 } else { 2370 ret = cdns3_ep_config(priv_ep, enable); 2371 } 2372 2373 if (ret) 2374 goto exit; 2375 2376 ret = cdns3_allocate_trb_pool(priv_ep); 2377 if (ret) 2378 goto exit; 2379 2380 bEndpointAddress = priv_ep->num | priv_ep->dir; 2381 cdns3_select_ep(priv_dev, bEndpointAddress); 2382 2383 trace_cdns3_gadget_ep_enable(priv_ep); 2384 2385 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2386 2387 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2388 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2389 1, 1000); 2390 2391 if (unlikely(ret)) { 2392 cdns3_free_trb_pool(priv_ep); 2393 ret = -EINVAL; 2394 goto exit; 2395 } 2396 2397 /* enable interrupt for selected endpoint */ 2398 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2399 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2400 2401 if (priv_dev->dev_ver < DEV_VER_V2) 2402 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2403 2404 writel(reg, &priv_dev->regs->ep_sts_en); 2405 2406 ep->desc = desc; 2407 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2408 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2409 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2410 priv_ep->wa1_set = 0; 2411 priv_ep->enqueue = 0; 2412 priv_ep->dequeue = 0; 2413 reg = readl(&priv_dev->regs->ep_sts); 2414 priv_ep->pcs = !!EP_STS_CCS(reg); 2415 priv_ep->ccs = !!EP_STS_CCS(reg); 2416 /* one TRB is reserved for link TRB used in DMULT mode*/ 2417 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2418 exit: 2419 spin_unlock_irqrestore(&priv_dev->lock, flags); 2420 2421 return ret; 2422 } 2423 2424 /** 2425 * cdns3_gadget_ep_disable - Disable endpoint 2426 * @ep: endpoint object 2427 * 2428 * Returns 0 on success, error code elsewhere 2429 */ 2430 static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2431 { 2432 struct cdns3_endpoint *priv_ep; 2433 struct cdns3_request *priv_req; 2434 struct cdns3_device *priv_dev; 2435 struct usb_request *request; 2436 unsigned long flags; 2437 int ret = 0; 2438 u32 ep_cfg; 2439 int val; 2440 2441 if (!ep) { 2442 pr_err("usbss: invalid parameters\n"); 2443 return -EINVAL; 2444 } 2445 2446 priv_ep = ep_to_cdns3_ep(ep); 2447 priv_dev = priv_ep->cdns3_dev; 2448 2449 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2450 "%s is already disabled\n", priv_ep->name)) 2451 return 0; 2452 2453 spin_lock_irqsave(&priv_dev->lock, flags); 2454 2455 trace_cdns3_gadget_ep_disable(priv_ep); 2456 2457 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2458 2459 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2460 ep_cfg &= ~EP_CFG_ENABLE; 2461 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2462 2463 /** 2464 * Driver needs some time before resetting endpoint. 2465 * It need waits for clearing DBUSY bit or for timeout expired. 2466 * 10us is enough time for controller to stop transfer. 2467 */ 2468 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2469 !(val & EP_STS_DBUSY), 1, 10); 2470 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2471 2472 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2473 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2474 1, 1000); 2475 if (unlikely(ret)) 2476 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2477 priv_ep->name); 2478 2479 while (!list_empty(&priv_ep->pending_req_list)) { 2480 request = cdns3_next_request(&priv_ep->pending_req_list); 2481 2482 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2483 -ESHUTDOWN); 2484 } 2485 2486 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2487 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2488 2489 kfree(priv_req->request.buf); 2490 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2491 &priv_req->request); 2492 list_del_init(&priv_req->list); 2493 --priv_ep->wa2_counter; 2494 } 2495 2496 while (!list_empty(&priv_ep->deferred_req_list)) { 2497 request = cdns3_next_request(&priv_ep->deferred_req_list); 2498 2499 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2500 -ESHUTDOWN); 2501 } 2502 2503 priv_ep->descmis_req = NULL; 2504 2505 ep->desc = NULL; 2506 priv_ep->flags &= ~EP_ENABLED; 2507 priv_ep->use_streams = false; 2508 2509 spin_unlock_irqrestore(&priv_dev->lock, flags); 2510 2511 return ret; 2512 } 2513 2514 /** 2515 * __cdns3_gadget_ep_queue - Transfer data on endpoint 2516 * @ep: endpoint object 2517 * @request: request object 2518 * @gfp_flags: gfp flags 2519 * 2520 * Returns 0 on success, error code elsewhere 2521 */ 2522 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2523 struct usb_request *request, 2524 gfp_t gfp_flags) 2525 { 2526 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2527 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2528 struct cdns3_request *priv_req; 2529 int ret = 0; 2530 2531 request->actual = 0; 2532 request->status = -EINPROGRESS; 2533 priv_req = to_cdns3_request(request); 2534 trace_cdns3_ep_queue(priv_req); 2535 2536 if (priv_dev->dev_ver < DEV_VER_V2) { 2537 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2538 priv_req); 2539 2540 if (ret == EINPROGRESS) 2541 return 0; 2542 } 2543 2544 ret = cdns3_prepare_aligned_request_buf(priv_req); 2545 if (ret < 0) 2546 return ret; 2547 2548 if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) { 2549 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2550 usb_endpoint_dir_in(ep->desc)); 2551 if (ret) 2552 return ret; 2553 } 2554 2555 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2556 2557 /* 2558 * For stream capable endpoint if prime irq flag is set then only start 2559 * request. 2560 * If hardware endpoint configuration has not been set yet then 2561 * just queue request in deferred list. Transfer will be started in 2562 * cdns3_set_hw_configuration. 2563 */ 2564 if (!request->stream_id) { 2565 if (priv_dev->hw_configured_flag && 2566 !(priv_ep->flags & EP_STALLED) && 2567 !(priv_ep->flags & EP_STALL_PENDING)) 2568 cdns3_start_all_request(priv_dev, priv_ep); 2569 } else { 2570 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2571 cdns3_start_all_request(priv_dev, priv_ep); 2572 } 2573 2574 return 0; 2575 } 2576 2577 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2578 gfp_t gfp_flags) 2579 { 2580 struct usb_request *zlp_request; 2581 struct cdns3_endpoint *priv_ep; 2582 struct cdns3_device *priv_dev; 2583 unsigned long flags; 2584 int ret; 2585 2586 if (!request || !ep) 2587 return -EINVAL; 2588 2589 priv_ep = ep_to_cdns3_ep(ep); 2590 priv_dev = priv_ep->cdns3_dev; 2591 2592 spin_lock_irqsave(&priv_dev->lock, flags); 2593 2594 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2595 2596 if (ret == 0 && request->zero && request->length && 2597 (request->length % ep->maxpacket == 0)) { 2598 struct cdns3_request *priv_req; 2599 2600 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2601 zlp_request->buf = priv_dev->zlp_buf; 2602 zlp_request->length = 0; 2603 2604 priv_req = to_cdns3_request(zlp_request); 2605 priv_req->flags |= REQUEST_ZLP; 2606 2607 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2608 priv_ep->name); 2609 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2610 } 2611 2612 spin_unlock_irqrestore(&priv_dev->lock, flags); 2613 return ret; 2614 } 2615 2616 /** 2617 * cdns3_gadget_ep_dequeue - Remove request from transfer queue 2618 * @ep: endpoint object associated with request 2619 * @request: request object 2620 * 2621 * Returns 0 on success, error code elsewhere 2622 */ 2623 int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2624 struct usb_request *request) 2625 { 2626 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2627 struct cdns3_device *priv_dev; 2628 struct usb_request *req, *req_temp; 2629 struct cdns3_request *priv_req; 2630 struct cdns3_trb *link_trb; 2631 u8 req_on_hw_ring = 0; 2632 unsigned long flags; 2633 int ret = 0; 2634 int val; 2635 2636 if (!ep || !request || !ep->desc) 2637 return -EINVAL; 2638 2639 priv_dev = priv_ep->cdns3_dev; 2640 2641 spin_lock_irqsave(&priv_dev->lock, flags); 2642 2643 priv_req = to_cdns3_request(request); 2644 2645 trace_cdns3_ep_dequeue(priv_req); 2646 2647 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2648 2649 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2650 list) { 2651 if (request == req) { 2652 req_on_hw_ring = 1; 2653 goto found; 2654 } 2655 } 2656 2657 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2658 list) { 2659 if (request == req) 2660 goto found; 2661 } 2662 2663 goto not_found; 2664 2665 found: 2666 link_trb = priv_req->trb; 2667 2668 /* Update ring only if removed request is on pending_req_list list */ 2669 if (req_on_hw_ring && link_trb) { 2670 /* Stop DMA */ 2671 writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd); 2672 2673 /* wait for DFLUSH cleared */ 2674 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2675 !(val & EP_CMD_DFLUSH), 1, 1000); 2676 2677 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2678 ((priv_req->end_trb + 1) * TRB_SIZE))); 2679 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2680 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2681 2682 if (priv_ep->wa1_trb == priv_req->trb) 2683 cdns3_wa1_restore_cycle_bit(priv_ep); 2684 } 2685 2686 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2687 2688 req = cdns3_next_request(&priv_ep->pending_req_list); 2689 if (req) 2690 cdns3_rearm_transfer(priv_ep, 1); 2691 2692 not_found: 2693 spin_unlock_irqrestore(&priv_dev->lock, flags); 2694 return ret; 2695 } 2696 2697 /** 2698 * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint 2699 * Should be called after acquiring spin_lock and selecting ep 2700 * @priv_ep: endpoint object to set stall on. 2701 */ 2702 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2703 { 2704 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2705 2706 trace_cdns3_halt(priv_ep, 1, 0); 2707 2708 if (!(priv_ep->flags & EP_STALLED)) { 2709 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2710 2711 if (!(ep_sts_reg & EP_STS_DBUSY)) 2712 cdns3_ep_stall_flush(priv_ep); 2713 else 2714 priv_ep->flags |= EP_STALL_PENDING; 2715 } 2716 } 2717 2718 /** 2719 * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint 2720 * Should be called after acquiring spin_lock and selecting ep 2721 * @priv_ep: endpoint object to clear stall on 2722 */ 2723 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2724 { 2725 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2726 struct usb_request *request; 2727 struct cdns3_request *priv_req; 2728 struct cdns3_trb *trb = NULL; 2729 struct cdns3_trb trb_tmp; 2730 int ret; 2731 int val; 2732 2733 trace_cdns3_halt(priv_ep, 0, 0); 2734 2735 request = cdns3_next_request(&priv_ep->pending_req_list); 2736 if (request) { 2737 priv_req = to_cdns3_request(request); 2738 trb = priv_req->trb; 2739 if (trb) { 2740 trb_tmp = *trb; 2741 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2742 } 2743 } 2744 2745 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2746 2747 /* wait for EPRST cleared */ 2748 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2749 !(val & EP_CMD_EPRST), 1, 100); 2750 if (ret) 2751 return -EINVAL; 2752 2753 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2754 2755 if (request) { 2756 if (trb) 2757 *trb = trb_tmp; 2758 2759 cdns3_rearm_transfer(priv_ep, 1); 2760 } 2761 2762 cdns3_start_all_request(priv_dev, priv_ep); 2763 return ret; 2764 } 2765 2766 /** 2767 * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint 2768 * @ep: endpoint object to set/clear stall on 2769 * @value: 1 for set stall, 0 for clear stall 2770 * 2771 * Returns 0 on success, error code elsewhere 2772 */ 2773 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2774 { 2775 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2776 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2777 unsigned long flags; 2778 int ret = 0; 2779 2780 if (!(priv_ep->flags & EP_ENABLED)) 2781 return -EPERM; 2782 2783 spin_lock_irqsave(&priv_dev->lock, flags); 2784 2785 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2786 2787 if (!value) { 2788 priv_ep->flags &= ~EP_WEDGE; 2789 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2790 } else { 2791 __cdns3_gadget_ep_set_halt(priv_ep); 2792 } 2793 2794 spin_unlock_irqrestore(&priv_dev->lock, flags); 2795 2796 return ret; 2797 } 2798 2799 extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2800 2801 static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2802 .enable = cdns3_gadget_ep_enable, 2803 .disable = cdns3_gadget_ep_disable, 2804 .alloc_request = cdns3_gadget_ep_alloc_request, 2805 .free_request = cdns3_gadget_ep_free_request, 2806 .queue = cdns3_gadget_ep_queue, 2807 .dequeue = cdns3_gadget_ep_dequeue, 2808 .set_halt = cdns3_gadget_ep_set_halt, 2809 .set_wedge = cdns3_gadget_ep_set_wedge, 2810 }; 2811 2812 /** 2813 * cdns3_gadget_get_frame - Returns number of actual ITP frame 2814 * @gadget: gadget object 2815 * 2816 * Returns number of actual ITP frame 2817 */ 2818 static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2819 { 2820 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2821 2822 return readl(&priv_dev->regs->usb_itpn); 2823 } 2824 2825 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2826 { 2827 enum usb_device_speed speed; 2828 2829 speed = cdns3_get_speed(priv_dev); 2830 2831 if (speed >= USB_SPEED_SUPER) 2832 return 0; 2833 2834 /* Start driving resume signaling to indicate remote wakeup. */ 2835 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2836 2837 return 0; 2838 } 2839 2840 static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2841 { 2842 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2843 unsigned long flags; 2844 int ret = 0; 2845 2846 spin_lock_irqsave(&priv_dev->lock, flags); 2847 ret = __cdns3_gadget_wakeup(priv_dev); 2848 spin_unlock_irqrestore(&priv_dev->lock, flags); 2849 return ret; 2850 } 2851 2852 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2853 int is_selfpowered) 2854 { 2855 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2856 unsigned long flags; 2857 2858 spin_lock_irqsave(&priv_dev->lock, flags); 2859 priv_dev->is_selfpowered = !!is_selfpowered; 2860 spin_unlock_irqrestore(&priv_dev->lock, flags); 2861 return 0; 2862 } 2863 2864 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2865 { 2866 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2867 2868 if (is_on) { 2869 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2870 } else { 2871 writel(~0, &priv_dev->regs->ep_ists); 2872 writel(~0, &priv_dev->regs->usb_ists); 2873 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2874 } 2875 2876 return 0; 2877 } 2878 2879 static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2880 { 2881 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2882 u32 reg; 2883 2884 cdns3_ep0_config(priv_dev); 2885 2886 /* enable interrupts for endpoint 0 (in and out) */ 2887 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2888 2889 /* 2890 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2891 * revision of controller. 2892 */ 2893 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2894 reg = readl(®s->dbg_link1); 2895 2896 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2897 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2898 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2899 writel(reg, ®s->dbg_link1); 2900 } 2901 2902 /* 2903 * By default some platforms has set protected access to memory. 2904 * This cause problem with cache, so driver restore non-secure 2905 * access to memory. 2906 */ 2907 reg = readl(®s->dma_axi_ctrl); 2908 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2909 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2910 writel(reg, ®s->dma_axi_ctrl); 2911 2912 /* enable generic interrupt*/ 2913 writel(USB_IEN_INIT, ®s->usb_ien); 2914 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2915 /* keep Fast Access bit */ 2916 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2917 2918 cdns3_configure_dmult(priv_dev, NULL); 2919 } 2920 2921 /** 2922 * cdns3_gadget_udc_start - Gadget start 2923 * @gadget: gadget object 2924 * @driver: driver which operates on this gadget 2925 * 2926 * Returns 0 on success, error code elsewhere 2927 */ 2928 static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2929 struct usb_gadget_driver *driver) 2930 { 2931 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2932 unsigned long flags; 2933 enum usb_device_speed max_speed = driver->max_speed; 2934 2935 spin_lock_irqsave(&priv_dev->lock, flags); 2936 priv_dev->gadget_driver = driver; 2937 2938 /* limit speed if necessary */ 2939 max_speed = min(driver->max_speed, gadget->max_speed); 2940 2941 switch (max_speed) { 2942 case USB_SPEED_FULL: 2943 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2944 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2945 break; 2946 case USB_SPEED_HIGH: 2947 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2948 break; 2949 case USB_SPEED_SUPER: 2950 break; 2951 default: 2952 dev_err(priv_dev->dev, 2953 "invalid maximum_speed parameter %d\n", 2954 max_speed); 2955 fallthrough; 2956 case USB_SPEED_UNKNOWN: 2957 /* default to superspeed */ 2958 max_speed = USB_SPEED_SUPER; 2959 break; 2960 } 2961 2962 cdns3_gadget_config(priv_dev); 2963 spin_unlock_irqrestore(&priv_dev->lock, flags); 2964 return 0; 2965 } 2966 2967 /** 2968 * cdns3_gadget_udc_stop - Stops gadget 2969 * @gadget: gadget object 2970 * 2971 * Returns 0 2972 */ 2973 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2974 { 2975 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2976 struct cdns3_endpoint *priv_ep; 2977 u32 bEndpointAddress; 2978 struct usb_ep *ep; 2979 int val; 2980 2981 priv_dev->gadget_driver = NULL; 2982 2983 priv_dev->onchip_used_size = 0; 2984 priv_dev->out_mem_is_allocated = 0; 2985 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2986 2987 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2988 priv_ep = ep_to_cdns3_ep(ep); 2989 bEndpointAddress = priv_ep->num | priv_ep->dir; 2990 cdns3_select_ep(priv_dev, bEndpointAddress); 2991 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2992 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2993 !(val & EP_CMD_EPRST), 1, 100); 2994 2995 priv_ep->flags &= ~EP_CLAIMED; 2996 } 2997 2998 /* disable interrupt for device */ 2999 writel(0, &priv_dev->regs->usb_ien); 3000 writel(0, &priv_dev->regs->usb_pwr); 3001 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 3002 3003 return 0; 3004 } 3005 3006 /** 3007 * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration 3008 * @gadget: pointer to the USB gadget 3009 * 3010 * Used to record the maximum number of endpoints being used in a USB composite 3011 * device. (across all configurations) This is to be used in the calculation 3012 * of the TXFIFO sizes when resizing internal memory for individual endpoints. 3013 * It will help ensured that the resizing logic reserves enough space for at 3014 * least one max packet. 3015 */ 3016 static int cdns3_gadget_check_config(struct usb_gadget *gadget) 3017 { 3018 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 3019 struct cdns3_endpoint *priv_ep; 3020 struct usb_ep *ep; 3021 int n_in = 0; 3022 int total; 3023 3024 list_for_each_entry(ep, &gadget->ep_list, ep_list) { 3025 priv_ep = ep_to_cdns3_ep(ep); 3026 if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN)) 3027 n_in++; 3028 } 3029 3030 /* 2KB are reserved for EP0, 1KB for out*/ 3031 total = 2 + n_in + 1; 3032 3033 if (total > priv_dev->onchip_buffers) 3034 return -ENOMEM; 3035 3036 priv_dev->ep_buf_size = priv_dev->ep_iso_burst = 3037 (priv_dev->onchip_buffers - 2) / (n_in + 1); 3038 3039 return 0; 3040 } 3041 3042 static const struct usb_gadget_ops cdns3_gadget_ops = { 3043 .get_frame = cdns3_gadget_get_frame, 3044 .wakeup = cdns3_gadget_wakeup, 3045 .set_selfpowered = cdns3_gadget_set_selfpowered, 3046 .pullup = cdns3_gadget_pullup, 3047 .udc_start = cdns3_gadget_udc_start, 3048 .udc_stop = cdns3_gadget_udc_stop, 3049 .match_ep = cdns3_gadget_match_ep, 3050 .check_config = cdns3_gadget_check_config, 3051 }; 3052 3053 static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 3054 { 3055 int i; 3056 3057 /* ep0 OUT point to ep0 IN. */ 3058 priv_dev->eps[16] = NULL; 3059 3060 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 3061 if (priv_dev->eps[i]) { 3062 cdns3_free_trb_pool(priv_dev->eps[i]); 3063 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 3064 } 3065 } 3066 3067 /** 3068 * cdns3_init_eps - Initializes software endpoints of gadget 3069 * @priv_dev: extended gadget object 3070 * 3071 * Returns 0 on success, error code elsewhere 3072 */ 3073 static int cdns3_init_eps(struct cdns3_device *priv_dev) 3074 { 3075 u32 ep_enabled_reg, iso_ep_reg; 3076 struct cdns3_endpoint *priv_ep; 3077 int ep_dir, ep_number; 3078 u32 ep_mask; 3079 int ret = 0; 3080 int i; 3081 3082 /* Read it from USB_CAP3 to USB_CAP5 */ 3083 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3084 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3085 3086 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3087 3088 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3089 ep_dir = i >> 4; /* i div 16 */ 3090 ep_number = i & 0xF; /* i % 16 */ 3091 ep_mask = BIT(i); 3092 3093 if (!(ep_enabled_reg & ep_mask)) 3094 continue; 3095 3096 if (ep_dir && !ep_number) { 3097 priv_dev->eps[i] = priv_dev->eps[0]; 3098 continue; 3099 } 3100 3101 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3102 GFP_KERNEL); 3103 if (!priv_ep) 3104 goto err; 3105 3106 /* set parent of endpoint object */ 3107 priv_ep->cdns3_dev = priv_dev; 3108 priv_dev->eps[i] = priv_ep; 3109 priv_ep->num = ep_number; 3110 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3111 3112 if (!ep_number) { 3113 ret = cdns3_init_ep0(priv_dev, priv_ep); 3114 if (ret) { 3115 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3116 goto err; 3117 } 3118 } else { 3119 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3120 ep_number, !!ep_dir ? "in" : "out"); 3121 priv_ep->endpoint.name = priv_ep->name; 3122 3123 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3124 CDNS3_EP_MAX_PACKET_LIMIT); 3125 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3126 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3127 if (ep_dir) 3128 priv_ep->endpoint.caps.dir_in = 1; 3129 else 3130 priv_ep->endpoint.caps.dir_out = 1; 3131 3132 if (iso_ep_reg & ep_mask) 3133 priv_ep->endpoint.caps.type_iso = 1; 3134 3135 priv_ep->endpoint.caps.type_bulk = 1; 3136 priv_ep->endpoint.caps.type_int = 1; 3137 3138 list_add_tail(&priv_ep->endpoint.ep_list, 3139 &priv_dev->gadget.ep_list); 3140 } 3141 3142 priv_ep->flags = 0; 3143 3144 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3145 priv_ep->name, 3146 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3147 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3148 3149 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3150 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3151 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3152 } 3153 3154 return 0; 3155 err: 3156 cdns3_free_all_eps(priv_dev); 3157 return -ENOMEM; 3158 } 3159 3160 static void cdns3_gadget_release(struct device *dev) 3161 { 3162 struct cdns3_device *priv_dev = container_of(dev, 3163 struct cdns3_device, gadget.dev); 3164 3165 kfree(priv_dev); 3166 } 3167 3168 static void cdns3_gadget_exit(struct cdns *cdns) 3169 { 3170 struct cdns3_device *priv_dev; 3171 3172 priv_dev = cdns->gadget_dev; 3173 3174 3175 pm_runtime_mark_last_busy(cdns->dev); 3176 pm_runtime_put_autosuspend(cdns->dev); 3177 3178 usb_del_gadget(&priv_dev->gadget); 3179 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3180 3181 cdns3_free_all_eps(priv_dev); 3182 3183 while (!list_empty(&priv_dev->aligned_buf_list)) { 3184 struct cdns3_aligned_buf *buf; 3185 3186 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3187 dma_free_noncoherent(priv_dev->sysdev, buf->size, 3188 buf->buf, 3189 buf->dma, 3190 buf->dir); 3191 3192 list_del(&buf->list); 3193 kfree(buf); 3194 } 3195 3196 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3197 priv_dev->setup_dma); 3198 dma_pool_destroy(priv_dev->eps_dma_pool); 3199 3200 kfree(priv_dev->zlp_buf); 3201 usb_put_gadget(&priv_dev->gadget); 3202 cdns->gadget_dev = NULL; 3203 cdns_drd_gadget_off(cdns); 3204 } 3205 3206 static int cdns3_gadget_start(struct cdns *cdns) 3207 { 3208 struct cdns3_device *priv_dev; 3209 u32 max_speed; 3210 int ret; 3211 3212 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3213 if (!priv_dev) 3214 return -ENOMEM; 3215 3216 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3217 cdns3_gadget_release); 3218 cdns->gadget_dev = priv_dev; 3219 priv_dev->sysdev = cdns->dev; 3220 priv_dev->dev = cdns->dev; 3221 priv_dev->regs = cdns->dev_regs; 3222 3223 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3224 &priv_dev->onchip_buffers); 3225 3226 if (priv_dev->onchip_buffers <= 0) { 3227 u32 reg = readl(&priv_dev->regs->usb_cap2); 3228 3229 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3230 } 3231 3232 if (!priv_dev->onchip_buffers) 3233 priv_dev->onchip_buffers = 256; 3234 3235 max_speed = usb_get_maximum_speed(cdns->dev); 3236 3237 /* Check the maximum_speed parameter */ 3238 switch (max_speed) { 3239 case USB_SPEED_FULL: 3240 case USB_SPEED_HIGH: 3241 case USB_SPEED_SUPER: 3242 break; 3243 default: 3244 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3245 max_speed); 3246 fallthrough; 3247 case USB_SPEED_UNKNOWN: 3248 /* default to superspeed */ 3249 max_speed = USB_SPEED_SUPER; 3250 break; 3251 } 3252 3253 /* fill gadget fields */ 3254 priv_dev->gadget.max_speed = max_speed; 3255 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3256 priv_dev->gadget.ops = &cdns3_gadget_ops; 3257 priv_dev->gadget.name = "usb-ss-gadget"; 3258 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3259 priv_dev->gadget.irq = cdns->dev_irq; 3260 3261 spin_lock_init(&priv_dev->lock); 3262 INIT_WORK(&priv_dev->pending_status_wq, 3263 cdns3_pending_setup_status_handler); 3264 3265 INIT_WORK(&priv_dev->aligned_buf_wq, 3266 cdns3_free_aligned_request_buf); 3267 3268 /* initialize endpoint container */ 3269 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3270 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3271 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", 3272 priv_dev->sysdev, 3273 TRB_RING_SIZE, 8, 0); 3274 if (!priv_dev->eps_dma_pool) { 3275 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); 3276 ret = -ENOMEM; 3277 goto err1; 3278 } 3279 3280 ret = cdns3_init_eps(priv_dev); 3281 if (ret) { 3282 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3283 goto err1; 3284 } 3285 3286 /* allocate memory for setup packet buffer */ 3287 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3288 &priv_dev->setup_dma, GFP_DMA); 3289 if (!priv_dev->setup_buf) { 3290 ret = -ENOMEM; 3291 goto err2; 3292 } 3293 3294 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3295 3296 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3297 readl(&priv_dev->regs->usb_cap6)); 3298 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3299 readl(&priv_dev->regs->usb_cap1)); 3300 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3301 readl(&priv_dev->regs->usb_cap2)); 3302 3303 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3304 if (priv_dev->dev_ver >= DEV_VER_V2) 3305 priv_dev->gadget.sg_supported = 1; 3306 3307 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3308 if (!priv_dev->zlp_buf) { 3309 ret = -ENOMEM; 3310 goto err3; 3311 } 3312 3313 /* add USB gadget device */ 3314 ret = usb_add_gadget(&priv_dev->gadget); 3315 if (ret < 0) { 3316 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3317 goto err4; 3318 } 3319 3320 return 0; 3321 err4: 3322 kfree(priv_dev->zlp_buf); 3323 err3: 3324 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3325 priv_dev->setup_dma); 3326 err2: 3327 cdns3_free_all_eps(priv_dev); 3328 err1: 3329 dma_pool_destroy(priv_dev->eps_dma_pool); 3330 3331 usb_put_gadget(&priv_dev->gadget); 3332 cdns->gadget_dev = NULL; 3333 return ret; 3334 } 3335 3336 static int __cdns3_gadget_init(struct cdns *cdns) 3337 { 3338 int ret = 0; 3339 3340 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3341 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3342 if (ret) { 3343 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3344 return ret; 3345 } 3346 3347 cdns_drd_gadget_on(cdns); 3348 pm_runtime_get_sync(cdns->dev); 3349 3350 ret = cdns3_gadget_start(cdns); 3351 if (ret) { 3352 pm_runtime_put_sync(cdns->dev); 3353 return ret; 3354 } 3355 3356 /* 3357 * Because interrupt line can be shared with other components in 3358 * driver it can't use IRQF_ONESHOT flag here. 3359 */ 3360 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3361 cdns3_device_irq_handler, 3362 cdns3_device_thread_irq_handler, 3363 IRQF_SHARED, dev_name(cdns->dev), 3364 cdns->gadget_dev); 3365 3366 if (ret) 3367 goto err0; 3368 3369 return 0; 3370 err0: 3371 cdns3_gadget_exit(cdns); 3372 return ret; 3373 } 3374 3375 static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3376 __must_hold(&cdns->lock) 3377 { 3378 struct cdns3_device *priv_dev = cdns->gadget_dev; 3379 3380 spin_unlock(&cdns->lock); 3381 cdns3_disconnect_gadget(priv_dev); 3382 spin_lock(&cdns->lock); 3383 3384 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3385 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3386 cdns3_hw_reset_eps_config(priv_dev); 3387 3388 /* disable interrupt for device */ 3389 writel(0, &priv_dev->regs->usb_ien); 3390 3391 return 0; 3392 } 3393 3394 static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3395 { 3396 struct cdns3_device *priv_dev = cdns->gadget_dev; 3397 3398 if (!priv_dev->gadget_driver) 3399 return 0; 3400 3401 cdns3_gadget_config(priv_dev); 3402 if (hibernated) 3403 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 3404 3405 return 0; 3406 } 3407 3408 /** 3409 * cdns3_gadget_init - initialize device structure 3410 * 3411 * @cdns: cdns instance 3412 * 3413 * This function initializes the gadget. 3414 */ 3415 int cdns3_gadget_init(struct cdns *cdns) 3416 { 3417 struct cdns_role_driver *rdrv; 3418 3419 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3420 if (!rdrv) 3421 return -ENOMEM; 3422 3423 rdrv->start = __cdns3_gadget_init; 3424 rdrv->stop = cdns3_gadget_exit; 3425 rdrv->suspend = cdns3_gadget_suspend; 3426 rdrv->resume = cdns3_gadget_resume; 3427 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3428 rdrv->name = "gadget"; 3429 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3430 3431 return 0; 3432 } 3433