1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 269 req, dep->name, req->request.actual, 270 req->request.length, status); 271 trace_dwc3_gadget_giveback(req); 272 273 spin_unlock(&dwc->lock); 274 usb_gadget_giveback_request(&dep->endpoint, &req->request); 275 spin_lock(&dwc->lock); 276 } 277 278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 279 { 280 u32 timeout = 500; 281 u32 reg; 282 283 trace_dwc3_gadget_generic_cmd(cmd, param); 284 285 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 286 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 287 288 do { 289 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 290 if (!(reg & DWC3_DGCMD_CMDACT)) { 291 dwc3_trace(trace_dwc3_gadget, 292 "Command Complete --> %d", 293 DWC3_DGCMD_STATUS(reg)); 294 if (DWC3_DGCMD_STATUS(reg)) 295 return -EINVAL; 296 return 0; 297 } 298 299 /* 300 * We can't sleep here, because it's also called from 301 * interrupt context. 302 */ 303 timeout--; 304 if (!timeout) { 305 dwc3_trace(trace_dwc3_gadget, 306 "Command Timed Out"); 307 return -ETIMEDOUT; 308 } 309 udelay(1); 310 } while (1); 311 } 312 313 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 314 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 315 { 316 struct dwc3_ep *dep = dwc->eps[ep]; 317 u32 timeout = 500; 318 u32 reg; 319 320 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 321 322 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 323 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 324 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 325 326 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 327 do { 328 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 329 if (!(reg & DWC3_DEPCMD_CMDACT)) { 330 dwc3_trace(trace_dwc3_gadget, 331 "Command Complete --> %d", 332 DWC3_DEPCMD_STATUS(reg)); 333 if (DWC3_DEPCMD_STATUS(reg)) 334 return -EINVAL; 335 return 0; 336 } 337 338 /* 339 * We can't sleep here, because it is also called from 340 * interrupt context. 341 */ 342 timeout--; 343 if (!timeout) { 344 dwc3_trace(trace_dwc3_gadget, 345 "Command Timed Out"); 346 return -ETIMEDOUT; 347 } 348 349 udelay(1); 350 } while (1); 351 } 352 353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 354 struct dwc3_trb *trb) 355 { 356 u32 offset = (char *) trb - (char *) dep->trb_pool; 357 358 return dep->trb_pool_dma + offset; 359 } 360 361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 362 { 363 struct dwc3 *dwc = dep->dwc; 364 365 if (dep->trb_pool) 366 return 0; 367 368 dep->trb_pool = dma_alloc_coherent(dwc->dev, 369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 370 &dep->trb_pool_dma, GFP_KERNEL); 371 if (!dep->trb_pool) { 372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 373 dep->name); 374 return -ENOMEM; 375 } 376 377 return 0; 378 } 379 380 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 381 { 382 struct dwc3 *dwc = dep->dwc; 383 384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 385 dep->trb_pool, dep->trb_pool_dma); 386 387 dep->trb_pool = NULL; 388 dep->trb_pool_dma = 0; 389 } 390 391 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 392 { 393 struct dwc3_gadget_ep_cmd_params params; 394 u32 cmd; 395 396 memset(¶ms, 0x00, sizeof(params)); 397 398 if (dep->number != 1) { 399 cmd = DWC3_DEPCMD_DEPSTARTCFG; 400 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 401 if (dep->number > 1) { 402 if (dwc->start_config_issued) 403 return 0; 404 dwc->start_config_issued = true; 405 cmd |= DWC3_DEPCMD_PARAM(2); 406 } 407 408 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 409 } 410 411 return 0; 412 } 413 414 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 415 const struct usb_endpoint_descriptor *desc, 416 const struct usb_ss_ep_comp_descriptor *comp_desc, 417 bool ignore, bool restore) 418 { 419 struct dwc3_gadget_ep_cmd_params params; 420 421 memset(¶ms, 0x00, sizeof(params)); 422 423 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 424 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 425 426 /* Burst size is only needed in SuperSpeed mode */ 427 if (dwc->gadget.speed == USB_SPEED_SUPER) { 428 u32 burst = dep->endpoint.maxburst - 1; 429 430 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 431 } 432 433 if (ignore) 434 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 435 436 if (restore) { 437 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 438 params.param2 |= dep->saved_state; 439 } 440 441 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 442 | DWC3_DEPCFG_XFER_NOT_READY_EN; 443 444 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 445 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 446 | DWC3_DEPCFG_STREAM_EVENT_EN; 447 dep->stream_capable = true; 448 } 449 450 if (!usb_endpoint_xfer_control(desc)) 451 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 452 453 /* 454 * We are doing 1:1 mapping for endpoints, meaning 455 * Physical Endpoints 2 maps to Logical Endpoint 2 and 456 * so on. We consider the direction bit as part of the physical 457 * endpoint number. So USB endpoint 0x81 is 0x03. 458 */ 459 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 460 461 /* 462 * We must use the lower 16 TX FIFOs even though 463 * HW might have more 464 */ 465 if (dep->direction) 466 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 467 468 if (desc->bInterval) { 469 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 470 dep->interval = 1 << (desc->bInterval - 1); 471 } 472 473 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 474 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 475 } 476 477 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 478 { 479 struct dwc3_gadget_ep_cmd_params params; 480 481 memset(¶ms, 0x00, sizeof(params)); 482 483 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 484 485 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 486 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 487 } 488 489 /** 490 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 491 * @dep: endpoint to be initialized 492 * @desc: USB Endpoint Descriptor 493 * 494 * Caller should take care of locking 495 */ 496 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 497 const struct usb_endpoint_descriptor *desc, 498 const struct usb_ss_ep_comp_descriptor *comp_desc, 499 bool ignore, bool restore) 500 { 501 struct dwc3 *dwc = dep->dwc; 502 u32 reg; 503 int ret; 504 505 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 506 507 if (!(dep->flags & DWC3_EP_ENABLED)) { 508 ret = dwc3_gadget_start_config(dwc, dep); 509 if (ret) 510 return ret; 511 } 512 513 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 514 restore); 515 if (ret) 516 return ret; 517 518 if (!(dep->flags & DWC3_EP_ENABLED)) { 519 struct dwc3_trb *trb_st_hw; 520 struct dwc3_trb *trb_link; 521 522 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 523 if (ret) 524 return ret; 525 526 dep->endpoint.desc = desc; 527 dep->comp_desc = comp_desc; 528 dep->type = usb_endpoint_type(desc); 529 dep->flags |= DWC3_EP_ENABLED; 530 531 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 532 reg |= DWC3_DALEPENA_EP(dep->number); 533 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 534 535 if (!usb_endpoint_xfer_isoc(desc)) 536 return 0; 537 538 /* Link TRB for ISOC. The HWO bit is never reset */ 539 trb_st_hw = &dep->trb_pool[0]; 540 541 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 542 memset(trb_link, 0, sizeof(*trb_link)); 543 544 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 545 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 546 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 547 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 548 } 549 550 switch (usb_endpoint_type(desc)) { 551 case USB_ENDPOINT_XFER_CONTROL: 552 strlcat(dep->name, "-control", sizeof(dep->name)); 553 break; 554 case USB_ENDPOINT_XFER_ISOC: 555 strlcat(dep->name, "-isoc", sizeof(dep->name)); 556 break; 557 case USB_ENDPOINT_XFER_BULK: 558 strlcat(dep->name, "-bulk", sizeof(dep->name)); 559 break; 560 case USB_ENDPOINT_XFER_INT: 561 strlcat(dep->name, "-int", sizeof(dep->name)); 562 break; 563 default: 564 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 565 } 566 567 return 0; 568 } 569 570 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 571 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 572 { 573 struct dwc3_request *req; 574 575 if (!list_empty(&dep->req_queued)) { 576 dwc3_stop_active_transfer(dwc, dep->number, true); 577 578 /* - giveback all requests to gadget driver */ 579 while (!list_empty(&dep->req_queued)) { 580 req = next_request(&dep->req_queued); 581 582 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 583 } 584 } 585 586 while (!list_empty(&dep->request_list)) { 587 req = next_request(&dep->request_list); 588 589 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 590 } 591 } 592 593 /** 594 * __dwc3_gadget_ep_disable - Disables a HW endpoint 595 * @dep: the endpoint to disable 596 * 597 * This function also removes requests which are currently processed ny the 598 * hardware and those which are not yet scheduled. 599 * Caller should take care of locking. 600 */ 601 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 602 { 603 struct dwc3 *dwc = dep->dwc; 604 u32 reg; 605 606 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 607 608 dwc3_remove_requests(dwc, dep); 609 610 /* make sure HW endpoint isn't stalled */ 611 if (dep->flags & DWC3_EP_STALL) 612 __dwc3_gadget_ep_set_halt(dep, 0, false); 613 614 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 615 reg &= ~DWC3_DALEPENA_EP(dep->number); 616 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 617 618 dep->stream_capable = false; 619 dep->endpoint.desc = NULL; 620 dep->comp_desc = NULL; 621 dep->type = 0; 622 dep->flags = 0; 623 624 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 625 dep->number >> 1, 626 (dep->number & 1) ? "in" : "out"); 627 628 return 0; 629 } 630 631 /* -------------------------------------------------------------------------- */ 632 633 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 634 const struct usb_endpoint_descriptor *desc) 635 { 636 return -EINVAL; 637 } 638 639 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 640 { 641 return -EINVAL; 642 } 643 644 /* -------------------------------------------------------------------------- */ 645 646 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 647 const struct usb_endpoint_descriptor *desc) 648 { 649 struct dwc3_ep *dep; 650 struct dwc3 *dwc; 651 unsigned long flags; 652 int ret; 653 654 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 655 pr_debug("dwc3: invalid parameters\n"); 656 return -EINVAL; 657 } 658 659 if (!desc->wMaxPacketSize) { 660 pr_debug("dwc3: missing wMaxPacketSize\n"); 661 return -EINVAL; 662 } 663 664 dep = to_dwc3_ep(ep); 665 dwc = dep->dwc; 666 667 if (dep->flags & DWC3_EP_ENABLED) { 668 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 669 dep->name); 670 return 0; 671 } 672 673 spin_lock_irqsave(&dwc->lock, flags); 674 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 675 spin_unlock_irqrestore(&dwc->lock, flags); 676 677 return ret; 678 } 679 680 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 681 { 682 struct dwc3_ep *dep; 683 struct dwc3 *dwc; 684 unsigned long flags; 685 int ret; 686 687 if (!ep) { 688 pr_debug("dwc3: invalid parameters\n"); 689 return -EINVAL; 690 } 691 692 dep = to_dwc3_ep(ep); 693 dwc = dep->dwc; 694 695 if (!(dep->flags & DWC3_EP_ENABLED)) { 696 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 697 dep->name); 698 return 0; 699 } 700 701 spin_lock_irqsave(&dwc->lock, flags); 702 ret = __dwc3_gadget_ep_disable(dep); 703 spin_unlock_irqrestore(&dwc->lock, flags); 704 705 return ret; 706 } 707 708 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 709 gfp_t gfp_flags) 710 { 711 struct dwc3_request *req; 712 struct dwc3_ep *dep = to_dwc3_ep(ep); 713 714 req = kzalloc(sizeof(*req), gfp_flags); 715 if (!req) 716 return NULL; 717 718 req->epnum = dep->number; 719 req->dep = dep; 720 721 trace_dwc3_alloc_request(req); 722 723 return &req->request; 724 } 725 726 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 727 struct usb_request *request) 728 { 729 struct dwc3_request *req = to_dwc3_request(request); 730 731 trace_dwc3_free_request(req); 732 kfree(req); 733 } 734 735 /** 736 * dwc3_prepare_one_trb - setup one TRB from one request 737 * @dep: endpoint for which this request is prepared 738 * @req: dwc3_request pointer 739 */ 740 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 741 struct dwc3_request *req, dma_addr_t dma, 742 unsigned length, unsigned last, unsigned chain, unsigned node) 743 { 744 struct dwc3_trb *trb; 745 746 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 747 dep->name, req, (unsigned long long) dma, 748 length, last ? " last" : "", 749 chain ? " chain" : ""); 750 751 752 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 753 754 if (!req->trb) { 755 dwc3_gadget_move_request_queued(req); 756 req->trb = trb; 757 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 758 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 759 } 760 761 dep->free_slot++; 762 /* Skip the LINK-TRB on ISOC */ 763 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 764 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 765 dep->free_slot++; 766 767 trb->size = DWC3_TRB_SIZE_LENGTH(length); 768 trb->bpl = lower_32_bits(dma); 769 trb->bph = upper_32_bits(dma); 770 771 switch (usb_endpoint_type(dep->endpoint.desc)) { 772 case USB_ENDPOINT_XFER_CONTROL: 773 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 774 break; 775 776 case USB_ENDPOINT_XFER_ISOC: 777 if (!node) 778 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 779 else 780 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 781 break; 782 783 case USB_ENDPOINT_XFER_BULK: 784 case USB_ENDPOINT_XFER_INT: 785 trb->ctrl = DWC3_TRBCTL_NORMAL; 786 break; 787 default: 788 /* 789 * This is only possible with faulty memory because we 790 * checked it already :) 791 */ 792 BUG(); 793 } 794 795 if (!req->request.no_interrupt && !chain) 796 trb->ctrl |= DWC3_TRB_CTRL_IOC; 797 798 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 799 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 800 trb->ctrl |= DWC3_TRB_CTRL_CSP; 801 } else if (last) { 802 trb->ctrl |= DWC3_TRB_CTRL_LST; 803 } 804 805 if (chain) 806 trb->ctrl |= DWC3_TRB_CTRL_CHN; 807 808 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 809 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 810 811 trb->ctrl |= DWC3_TRB_CTRL_HWO; 812 813 trace_dwc3_prepare_trb(dep, trb); 814 } 815 816 /* 817 * dwc3_prepare_trbs - setup TRBs from requests 818 * @dep: endpoint for which requests are being prepared 819 * @starting: true if the endpoint is idle and no requests are queued. 820 * 821 * The function goes through the requests list and sets up TRBs for the 822 * transfers. The function returns once there are no more TRBs available or 823 * it runs out of requests. 824 */ 825 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 826 { 827 struct dwc3_request *req, *n; 828 u32 trbs_left; 829 u32 max; 830 unsigned int last_one = 0; 831 832 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 833 834 /* the first request must not be queued */ 835 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 836 837 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 838 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 839 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 840 if (trbs_left > max) 841 trbs_left = max; 842 } 843 844 /* 845 * If busy & slot are equal than it is either full or empty. If we are 846 * starting to process requests then we are empty. Otherwise we are 847 * full and don't do anything 848 */ 849 if (!trbs_left) { 850 if (!starting) 851 return; 852 trbs_left = DWC3_TRB_NUM; 853 /* 854 * In case we start from scratch, we queue the ISOC requests 855 * starting from slot 1. This is done because we use ring 856 * buffer and have no LST bit to stop us. Instead, we place 857 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 858 * after the first request so we start at slot 1 and have 859 * 7 requests proceed before we hit the first IOC. 860 * Other transfer types don't use the ring buffer and are 861 * processed from the first TRB until the last one. Since we 862 * don't wrap around we have to start at the beginning. 863 */ 864 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 865 dep->busy_slot = 1; 866 dep->free_slot = 1; 867 } else { 868 dep->busy_slot = 0; 869 dep->free_slot = 0; 870 } 871 } 872 873 /* The last TRB is a link TRB, not used for xfer */ 874 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 875 return; 876 877 list_for_each_entry_safe(req, n, &dep->request_list, list) { 878 unsigned length; 879 dma_addr_t dma; 880 last_one = false; 881 882 if (req->request.num_mapped_sgs > 0) { 883 struct usb_request *request = &req->request; 884 struct scatterlist *sg = request->sg; 885 struct scatterlist *s; 886 int i; 887 888 for_each_sg(sg, s, request->num_mapped_sgs, i) { 889 unsigned chain = true; 890 891 length = sg_dma_len(s); 892 dma = sg_dma_address(s); 893 894 if (i == (request->num_mapped_sgs - 1) || 895 sg_is_last(s)) { 896 if (list_empty(&dep->request_list)) 897 last_one = true; 898 chain = false; 899 } 900 901 trbs_left--; 902 if (!trbs_left) 903 last_one = true; 904 905 if (last_one) 906 chain = false; 907 908 dwc3_prepare_one_trb(dep, req, dma, length, 909 last_one, chain, i); 910 911 if (last_one) 912 break; 913 } 914 915 if (last_one) 916 break; 917 } else { 918 dma = req->request.dma; 919 length = req->request.length; 920 trbs_left--; 921 922 if (!trbs_left) 923 last_one = 1; 924 925 /* Is this the last request? */ 926 if (list_is_last(&req->list, &dep->request_list)) 927 last_one = 1; 928 929 dwc3_prepare_one_trb(dep, req, dma, length, 930 last_one, false, 0); 931 932 if (last_one) 933 break; 934 } 935 } 936 } 937 938 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 939 int start_new) 940 { 941 struct dwc3_gadget_ep_cmd_params params; 942 struct dwc3_request *req; 943 struct dwc3 *dwc = dep->dwc; 944 int ret; 945 u32 cmd; 946 947 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 948 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 949 return -EBUSY; 950 } 951 952 /* 953 * If we are getting here after a short-out-packet we don't enqueue any 954 * new requests as we try to set the IOC bit only on the last request. 955 */ 956 if (start_new) { 957 if (list_empty(&dep->req_queued)) 958 dwc3_prepare_trbs(dep, start_new); 959 960 /* req points to the first request which will be sent */ 961 req = next_request(&dep->req_queued); 962 } else { 963 dwc3_prepare_trbs(dep, start_new); 964 965 /* 966 * req points to the first request where HWO changed from 0 to 1 967 */ 968 req = next_request(&dep->req_queued); 969 } 970 if (!req) { 971 dep->flags |= DWC3_EP_PENDING_REQUEST; 972 return 0; 973 } 974 975 memset(¶ms, 0, sizeof(params)); 976 977 if (start_new) { 978 params.param0 = upper_32_bits(req->trb_dma); 979 params.param1 = lower_32_bits(req->trb_dma); 980 cmd = DWC3_DEPCMD_STARTTRANSFER; 981 } else { 982 cmd = DWC3_DEPCMD_UPDATETRANSFER; 983 } 984 985 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 986 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 987 if (ret < 0) { 988 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 989 990 /* 991 * FIXME we need to iterate over the list of requests 992 * here and stop, unmap, free and del each of the linked 993 * requests instead of what we do now. 994 */ 995 usb_gadget_unmap_request(&dwc->gadget, &req->request, 996 req->direction); 997 list_del(&req->list); 998 return ret; 999 } 1000 1001 dep->flags |= DWC3_EP_BUSY; 1002 1003 if (start_new) { 1004 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1005 dep->number); 1006 WARN_ON_ONCE(!dep->resource_index); 1007 } 1008 1009 return 0; 1010 } 1011 1012 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1013 struct dwc3_ep *dep, u32 cur_uf) 1014 { 1015 u32 uf; 1016 1017 if (list_empty(&dep->request_list)) { 1018 dwc3_trace(trace_dwc3_gadget, 1019 "ISOC ep %s run out for requests", 1020 dep->name); 1021 dep->flags |= DWC3_EP_PENDING_REQUEST; 1022 return; 1023 } 1024 1025 /* 4 micro frames in the future */ 1026 uf = cur_uf + dep->interval * 4; 1027 1028 __dwc3_gadget_kick_transfer(dep, uf, 1); 1029 } 1030 1031 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1032 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1033 { 1034 u32 cur_uf, mask; 1035 1036 mask = ~(dep->interval - 1); 1037 cur_uf = event->parameters & mask; 1038 1039 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1040 } 1041 1042 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1043 { 1044 struct dwc3 *dwc = dep->dwc; 1045 int ret; 1046 1047 req->request.actual = 0; 1048 req->request.status = -EINPROGRESS; 1049 req->direction = dep->direction; 1050 req->epnum = dep->number; 1051 1052 trace_dwc3_ep_queue(req); 1053 1054 /* 1055 * We only add to our list of requests now and 1056 * start consuming the list once we get XferNotReady 1057 * IRQ. 1058 * 1059 * That way, we avoid doing anything that we don't need 1060 * to do now and defer it until the point we receive a 1061 * particular token from the Host side. 1062 * 1063 * This will also avoid Host cancelling URBs due to too 1064 * many NAKs. 1065 */ 1066 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1067 dep->direction); 1068 if (ret) 1069 return ret; 1070 1071 list_add_tail(&req->list, &dep->request_list); 1072 1073 /* 1074 * If there are no pending requests and the endpoint isn't already 1075 * busy, we will just start the request straight away. 1076 * 1077 * This will save one IRQ (XFER_NOT_READY) and possibly make it a 1078 * little bit faster. 1079 */ 1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1081 !(dep->flags & DWC3_EP_BUSY)) { 1082 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1083 goto out; 1084 } 1085 1086 /* 1087 * There are a few special cases: 1088 * 1089 * 1. XferNotReady with empty list of requests. We need to kick the 1090 * transfer here in that situation, otherwise we will be NAKing 1091 * forever. If we get XferNotReady before gadget driver has a 1092 * chance to queue a request, we will ACK the IRQ but won't be 1093 * able to receive the data until the next request is queued. 1094 * The following code is handling exactly that. 1095 * 1096 */ 1097 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1098 /* 1099 * If xfernotready is already elapsed and it is a case 1100 * of isoc transfer, then issue END TRANSFER, so that 1101 * you can receive xfernotready again and can have 1102 * notion of current microframe. 1103 */ 1104 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1105 if (list_empty(&dep->req_queued)) { 1106 dwc3_stop_active_transfer(dwc, dep->number, true); 1107 dep->flags = DWC3_EP_ENABLED; 1108 } 1109 return 0; 1110 } 1111 1112 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1113 if (!ret) 1114 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1115 1116 goto out; 1117 } 1118 1119 /* 1120 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1121 * kick the transfer here after queuing a request, otherwise the 1122 * core may not see the modified TRB(s). 1123 */ 1124 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1125 (dep->flags & DWC3_EP_BUSY) && 1126 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1127 WARN_ON_ONCE(!dep->resource_index); 1128 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1129 false); 1130 goto out; 1131 } 1132 1133 /* 1134 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1135 * right away, otherwise host will not know we have streams to be 1136 * handled. 1137 */ 1138 if (dep->stream_capable) 1139 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1140 1141 out: 1142 if (ret && ret != -EBUSY) 1143 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1144 dep->name); 1145 if (ret == -EBUSY) 1146 ret = 0; 1147 1148 return ret; 1149 } 1150 1151 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1152 gfp_t gfp_flags) 1153 { 1154 struct dwc3_request *req = to_dwc3_request(request); 1155 struct dwc3_ep *dep = to_dwc3_ep(ep); 1156 struct dwc3 *dwc = dep->dwc; 1157 1158 unsigned long flags; 1159 1160 int ret; 1161 1162 spin_lock_irqsave(&dwc->lock, flags); 1163 if (!dep->endpoint.desc) { 1164 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1165 request, ep->name); 1166 ret = -ESHUTDOWN; 1167 goto out; 1168 } 1169 1170 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1171 request, req->dep->name)) { 1172 ret = -EINVAL; 1173 goto out; 1174 } 1175 1176 ret = __dwc3_gadget_ep_queue(dep, req); 1177 1178 out: 1179 spin_unlock_irqrestore(&dwc->lock, flags); 1180 1181 return ret; 1182 } 1183 1184 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1185 struct usb_request *request) 1186 { 1187 struct dwc3_request *req = to_dwc3_request(request); 1188 struct dwc3_request *r = NULL; 1189 1190 struct dwc3_ep *dep = to_dwc3_ep(ep); 1191 struct dwc3 *dwc = dep->dwc; 1192 1193 unsigned long flags; 1194 int ret = 0; 1195 1196 trace_dwc3_ep_dequeue(req); 1197 1198 spin_lock_irqsave(&dwc->lock, flags); 1199 1200 list_for_each_entry(r, &dep->request_list, list) { 1201 if (r == req) 1202 break; 1203 } 1204 1205 if (r != req) { 1206 list_for_each_entry(r, &dep->req_queued, list) { 1207 if (r == req) 1208 break; 1209 } 1210 if (r == req) { 1211 /* wait until it is processed */ 1212 dwc3_stop_active_transfer(dwc, dep->number, true); 1213 goto out1; 1214 } 1215 dev_err(dwc->dev, "request %p was not queued to %s\n", 1216 request, ep->name); 1217 ret = -EINVAL; 1218 goto out0; 1219 } 1220 1221 out1: 1222 /* giveback the request */ 1223 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1224 1225 out0: 1226 spin_unlock_irqrestore(&dwc->lock, flags); 1227 1228 return ret; 1229 } 1230 1231 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1232 { 1233 struct dwc3_gadget_ep_cmd_params params; 1234 struct dwc3 *dwc = dep->dwc; 1235 int ret; 1236 1237 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1238 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1239 return -EINVAL; 1240 } 1241 1242 memset(¶ms, 0x00, sizeof(params)); 1243 1244 if (value) { 1245 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1246 (!list_empty(&dep->req_queued) || 1247 !list_empty(&dep->request_list)))) { 1248 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1249 dep->name); 1250 return -EAGAIN; 1251 } 1252 1253 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1254 DWC3_DEPCMD_SETSTALL, ¶ms); 1255 if (ret) 1256 dev_err(dwc->dev, "failed to set STALL on %s\n", 1257 dep->name); 1258 else 1259 dep->flags |= DWC3_EP_STALL; 1260 } else { 1261 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1262 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1263 if (ret) 1264 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1265 dep->name); 1266 else 1267 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1268 } 1269 1270 return ret; 1271 } 1272 1273 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1274 { 1275 struct dwc3_ep *dep = to_dwc3_ep(ep); 1276 struct dwc3 *dwc = dep->dwc; 1277 1278 unsigned long flags; 1279 1280 int ret; 1281 1282 spin_lock_irqsave(&dwc->lock, flags); 1283 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1284 spin_unlock_irqrestore(&dwc->lock, flags); 1285 1286 return ret; 1287 } 1288 1289 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1290 { 1291 struct dwc3_ep *dep = to_dwc3_ep(ep); 1292 struct dwc3 *dwc = dep->dwc; 1293 unsigned long flags; 1294 int ret; 1295 1296 spin_lock_irqsave(&dwc->lock, flags); 1297 dep->flags |= DWC3_EP_WEDGE; 1298 1299 if (dep->number == 0 || dep->number == 1) 1300 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1301 else 1302 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1303 spin_unlock_irqrestore(&dwc->lock, flags); 1304 1305 return ret; 1306 } 1307 1308 /* -------------------------------------------------------------------------- */ 1309 1310 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1311 .bLength = USB_DT_ENDPOINT_SIZE, 1312 .bDescriptorType = USB_DT_ENDPOINT, 1313 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1314 }; 1315 1316 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1317 .enable = dwc3_gadget_ep0_enable, 1318 .disable = dwc3_gadget_ep0_disable, 1319 .alloc_request = dwc3_gadget_ep_alloc_request, 1320 .free_request = dwc3_gadget_ep_free_request, 1321 .queue = dwc3_gadget_ep0_queue, 1322 .dequeue = dwc3_gadget_ep_dequeue, 1323 .set_halt = dwc3_gadget_ep0_set_halt, 1324 .set_wedge = dwc3_gadget_ep_set_wedge, 1325 }; 1326 1327 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1328 .enable = dwc3_gadget_ep_enable, 1329 .disable = dwc3_gadget_ep_disable, 1330 .alloc_request = dwc3_gadget_ep_alloc_request, 1331 .free_request = dwc3_gadget_ep_free_request, 1332 .queue = dwc3_gadget_ep_queue, 1333 .dequeue = dwc3_gadget_ep_dequeue, 1334 .set_halt = dwc3_gadget_ep_set_halt, 1335 .set_wedge = dwc3_gadget_ep_set_wedge, 1336 }; 1337 1338 /* -------------------------------------------------------------------------- */ 1339 1340 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1341 { 1342 struct dwc3 *dwc = gadget_to_dwc(g); 1343 u32 reg; 1344 1345 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1346 return DWC3_DSTS_SOFFN(reg); 1347 } 1348 1349 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1350 { 1351 struct dwc3 *dwc = gadget_to_dwc(g); 1352 1353 unsigned long timeout; 1354 unsigned long flags; 1355 1356 u32 reg; 1357 1358 int ret = 0; 1359 1360 u8 link_state; 1361 u8 speed; 1362 1363 spin_lock_irqsave(&dwc->lock, flags); 1364 1365 /* 1366 * According to the Databook Remote wakeup request should 1367 * be issued only when the device is in early suspend state. 1368 * 1369 * We can check that via USB Link State bits in DSTS register. 1370 */ 1371 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1372 1373 speed = reg & DWC3_DSTS_CONNECTSPD; 1374 if (speed == DWC3_DSTS_SUPERSPEED) { 1375 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1376 ret = -EINVAL; 1377 goto out; 1378 } 1379 1380 link_state = DWC3_DSTS_USBLNKST(reg); 1381 1382 switch (link_state) { 1383 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1384 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1385 break; 1386 default: 1387 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1388 link_state); 1389 ret = -EINVAL; 1390 goto out; 1391 } 1392 1393 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1394 if (ret < 0) { 1395 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1396 goto out; 1397 } 1398 1399 /* Recent versions do this automatically */ 1400 if (dwc->revision < DWC3_REVISION_194A) { 1401 /* write zeroes to Link Change Request */ 1402 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1403 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1404 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1405 } 1406 1407 /* poll until Link State changes to ON */ 1408 timeout = jiffies + msecs_to_jiffies(100); 1409 1410 while (!time_after(jiffies, timeout)) { 1411 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1412 1413 /* in HS, means ON */ 1414 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1415 break; 1416 } 1417 1418 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1419 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1420 ret = -EINVAL; 1421 } 1422 1423 out: 1424 spin_unlock_irqrestore(&dwc->lock, flags); 1425 1426 return ret; 1427 } 1428 1429 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1430 int is_selfpowered) 1431 { 1432 struct dwc3 *dwc = gadget_to_dwc(g); 1433 unsigned long flags; 1434 1435 spin_lock_irqsave(&dwc->lock, flags); 1436 g->is_selfpowered = !!is_selfpowered; 1437 spin_unlock_irqrestore(&dwc->lock, flags); 1438 1439 return 0; 1440 } 1441 1442 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1443 { 1444 u32 reg; 1445 u32 timeout = 500; 1446 1447 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1448 if (is_on) { 1449 if (dwc->revision <= DWC3_REVISION_187A) { 1450 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1451 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1452 } 1453 1454 if (dwc->revision >= DWC3_REVISION_194A) 1455 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1456 reg |= DWC3_DCTL_RUN_STOP; 1457 1458 if (dwc->has_hibernation) 1459 reg |= DWC3_DCTL_KEEP_CONNECT; 1460 1461 dwc->pullups_connected = true; 1462 } else { 1463 reg &= ~DWC3_DCTL_RUN_STOP; 1464 1465 if (dwc->has_hibernation && !suspend) 1466 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1467 1468 dwc->pullups_connected = false; 1469 } 1470 1471 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1472 1473 do { 1474 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1475 if (is_on) { 1476 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1477 break; 1478 } else { 1479 if (reg & DWC3_DSTS_DEVCTRLHLT) 1480 break; 1481 } 1482 timeout--; 1483 if (!timeout) 1484 return -ETIMEDOUT; 1485 udelay(1); 1486 } while (1); 1487 1488 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1489 dwc->gadget_driver 1490 ? dwc->gadget_driver->function : "no-function", 1491 is_on ? "connect" : "disconnect"); 1492 1493 return 0; 1494 } 1495 1496 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1497 { 1498 struct dwc3 *dwc = gadget_to_dwc(g); 1499 unsigned long flags; 1500 int ret; 1501 1502 is_on = !!is_on; 1503 1504 spin_lock_irqsave(&dwc->lock, flags); 1505 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1506 spin_unlock_irqrestore(&dwc->lock, flags); 1507 1508 return ret; 1509 } 1510 1511 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1512 { 1513 u32 reg; 1514 1515 /* Enable all but Start and End of Frame IRQs */ 1516 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1517 DWC3_DEVTEN_EVNTOVERFLOWEN | 1518 DWC3_DEVTEN_CMDCMPLTEN | 1519 DWC3_DEVTEN_ERRTICERREN | 1520 DWC3_DEVTEN_WKUPEVTEN | 1521 DWC3_DEVTEN_ULSTCNGEN | 1522 DWC3_DEVTEN_CONNECTDONEEN | 1523 DWC3_DEVTEN_USBRSTEN | 1524 DWC3_DEVTEN_DISCONNEVTEN); 1525 1526 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1527 } 1528 1529 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1530 { 1531 /* mask all interrupts */ 1532 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1533 } 1534 1535 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1536 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1537 1538 static int dwc3_gadget_start(struct usb_gadget *g, 1539 struct usb_gadget_driver *driver) 1540 { 1541 struct dwc3 *dwc = gadget_to_dwc(g); 1542 struct dwc3_ep *dep; 1543 unsigned long flags; 1544 int ret = 0; 1545 int irq; 1546 u32 reg; 1547 1548 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1549 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1550 IRQF_SHARED, "dwc3", dwc); 1551 if (ret) { 1552 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1553 irq, ret); 1554 goto err0; 1555 } 1556 1557 spin_lock_irqsave(&dwc->lock, flags); 1558 1559 if (dwc->gadget_driver) { 1560 dev_err(dwc->dev, "%s is already bound to %s\n", 1561 dwc->gadget.name, 1562 dwc->gadget_driver->driver.name); 1563 ret = -EBUSY; 1564 goto err1; 1565 } 1566 1567 dwc->gadget_driver = driver; 1568 1569 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1570 reg &= ~(DWC3_DCFG_SPEED_MASK); 1571 1572 /** 1573 * WORKAROUND: DWC3 revision < 2.20a have an issue 1574 * which would cause metastability state on Run/Stop 1575 * bit if we try to force the IP to USB2-only mode. 1576 * 1577 * Because of that, we cannot configure the IP to any 1578 * speed other than the SuperSpeed 1579 * 1580 * Refers to: 1581 * 1582 * STAR#9000525659: Clock Domain Crossing on DCTL in 1583 * USB 2.0 Mode 1584 */ 1585 if (dwc->revision < DWC3_REVISION_220A) { 1586 reg |= DWC3_DCFG_SUPERSPEED; 1587 } else { 1588 switch (dwc->maximum_speed) { 1589 case USB_SPEED_LOW: 1590 reg |= DWC3_DSTS_LOWSPEED; 1591 break; 1592 case USB_SPEED_FULL: 1593 reg |= DWC3_DSTS_FULLSPEED1; 1594 break; 1595 case USB_SPEED_HIGH: 1596 reg |= DWC3_DSTS_HIGHSPEED; 1597 break; 1598 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1599 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1600 default: 1601 reg |= DWC3_DSTS_SUPERSPEED; 1602 } 1603 } 1604 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1605 1606 dwc->start_config_issued = false; 1607 1608 /* Start with SuperSpeed Default */ 1609 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1610 1611 dep = dwc->eps[0]; 1612 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1613 false); 1614 if (ret) { 1615 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1616 goto err2; 1617 } 1618 1619 dep = dwc->eps[1]; 1620 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1621 false); 1622 if (ret) { 1623 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1624 goto err3; 1625 } 1626 1627 /* begin to receive SETUP packets */ 1628 dwc->ep0state = EP0_SETUP_PHASE; 1629 dwc3_ep0_out_start(dwc); 1630 1631 dwc3_gadget_enable_irq(dwc); 1632 1633 spin_unlock_irqrestore(&dwc->lock, flags); 1634 1635 return 0; 1636 1637 err3: 1638 __dwc3_gadget_ep_disable(dwc->eps[0]); 1639 1640 err2: 1641 dwc->gadget_driver = NULL; 1642 1643 err1: 1644 spin_unlock_irqrestore(&dwc->lock, flags); 1645 1646 free_irq(irq, dwc); 1647 1648 err0: 1649 return ret; 1650 } 1651 1652 static int dwc3_gadget_stop(struct usb_gadget *g) 1653 { 1654 struct dwc3 *dwc = gadget_to_dwc(g); 1655 unsigned long flags; 1656 int irq; 1657 1658 spin_lock_irqsave(&dwc->lock, flags); 1659 1660 dwc3_gadget_disable_irq(dwc); 1661 __dwc3_gadget_ep_disable(dwc->eps[0]); 1662 __dwc3_gadget_ep_disable(dwc->eps[1]); 1663 1664 dwc->gadget_driver = NULL; 1665 1666 spin_unlock_irqrestore(&dwc->lock, flags); 1667 1668 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1669 free_irq(irq, dwc); 1670 1671 return 0; 1672 } 1673 1674 static const struct usb_gadget_ops dwc3_gadget_ops = { 1675 .get_frame = dwc3_gadget_get_frame, 1676 .wakeup = dwc3_gadget_wakeup, 1677 .set_selfpowered = dwc3_gadget_set_selfpowered, 1678 .pullup = dwc3_gadget_pullup, 1679 .udc_start = dwc3_gadget_start, 1680 .udc_stop = dwc3_gadget_stop, 1681 }; 1682 1683 /* -------------------------------------------------------------------------- */ 1684 1685 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1686 u8 num, u32 direction) 1687 { 1688 struct dwc3_ep *dep; 1689 u8 i; 1690 1691 for (i = 0; i < num; i++) { 1692 u8 epnum = (i << 1) | (!!direction); 1693 1694 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1695 if (!dep) 1696 return -ENOMEM; 1697 1698 dep->dwc = dwc; 1699 dep->number = epnum; 1700 dep->direction = !!direction; 1701 dwc->eps[epnum] = dep; 1702 1703 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1704 (epnum & 1) ? "in" : "out"); 1705 1706 dep->endpoint.name = dep->name; 1707 1708 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1709 1710 if (epnum == 0 || epnum == 1) { 1711 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1712 dep->endpoint.maxburst = 1; 1713 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1714 if (!epnum) 1715 dwc->gadget.ep0 = &dep->endpoint; 1716 } else { 1717 int ret; 1718 1719 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1720 dep->endpoint.max_streams = 15; 1721 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1722 list_add_tail(&dep->endpoint.ep_list, 1723 &dwc->gadget.ep_list); 1724 1725 ret = dwc3_alloc_trb_pool(dep); 1726 if (ret) 1727 return ret; 1728 } 1729 1730 if (epnum == 0 || epnum == 1) { 1731 dep->endpoint.caps.type_control = true; 1732 } else { 1733 dep->endpoint.caps.type_iso = true; 1734 dep->endpoint.caps.type_bulk = true; 1735 dep->endpoint.caps.type_int = true; 1736 } 1737 1738 dep->endpoint.caps.dir_in = !!direction; 1739 dep->endpoint.caps.dir_out = !direction; 1740 1741 INIT_LIST_HEAD(&dep->request_list); 1742 INIT_LIST_HEAD(&dep->req_queued); 1743 } 1744 1745 return 0; 1746 } 1747 1748 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1749 { 1750 int ret; 1751 1752 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1753 1754 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1755 if (ret < 0) { 1756 dwc3_trace(trace_dwc3_gadget, 1757 "failed to allocate OUT endpoints"); 1758 return ret; 1759 } 1760 1761 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1762 if (ret < 0) { 1763 dwc3_trace(trace_dwc3_gadget, 1764 "failed to allocate IN endpoints"); 1765 return ret; 1766 } 1767 1768 return 0; 1769 } 1770 1771 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1772 { 1773 struct dwc3_ep *dep; 1774 u8 epnum; 1775 1776 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1777 dep = dwc->eps[epnum]; 1778 if (!dep) 1779 continue; 1780 /* 1781 * Physical endpoints 0 and 1 are special; they form the 1782 * bi-directional USB endpoint 0. 1783 * 1784 * For those two physical endpoints, we don't allocate a TRB 1785 * pool nor do we add them the endpoints list. Due to that, we 1786 * shouldn't do these two operations otherwise we would end up 1787 * with all sorts of bugs when removing dwc3.ko. 1788 */ 1789 if (epnum != 0 && epnum != 1) { 1790 dwc3_free_trb_pool(dep); 1791 list_del(&dep->endpoint.ep_list); 1792 } 1793 1794 kfree(dep); 1795 } 1796 } 1797 1798 /* -------------------------------------------------------------------------- */ 1799 1800 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1801 struct dwc3_request *req, struct dwc3_trb *trb, 1802 const struct dwc3_event_depevt *event, int status) 1803 { 1804 unsigned int count; 1805 unsigned int s_pkt = 0; 1806 unsigned int trb_status; 1807 1808 trace_dwc3_complete_trb(dep, trb); 1809 1810 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1811 /* 1812 * We continue despite the error. There is not much we 1813 * can do. If we don't clean it up we loop forever. If 1814 * we skip the TRB then it gets overwritten after a 1815 * while since we use them in a ring buffer. A BUG() 1816 * would help. Lets hope that if this occurs, someone 1817 * fixes the root cause instead of looking away :) 1818 */ 1819 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1820 dep->name, trb); 1821 count = trb->size & DWC3_TRB_SIZE_MASK; 1822 1823 if (dep->direction) { 1824 if (count) { 1825 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1826 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1827 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1828 dep->name); 1829 /* 1830 * If missed isoc occurred and there is 1831 * no request queued then issue END 1832 * TRANSFER, so that core generates 1833 * next xfernotready and we will issue 1834 * a fresh START TRANSFER. 1835 * If there are still queued request 1836 * then wait, do not issue either END 1837 * or UPDATE TRANSFER, just attach next 1838 * request in request_list during 1839 * giveback.If any future queued request 1840 * is successfully transferred then we 1841 * will issue UPDATE TRANSFER for all 1842 * request in the request_list. 1843 */ 1844 dep->flags |= DWC3_EP_MISSED_ISOC; 1845 } else { 1846 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1847 dep->name); 1848 status = -ECONNRESET; 1849 } 1850 } else { 1851 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1852 } 1853 } else { 1854 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1855 s_pkt = 1; 1856 } 1857 1858 /* 1859 * We assume here we will always receive the entire data block 1860 * which we should receive. Meaning, if we program RX to 1861 * receive 4K but we receive only 2K, we assume that's all we 1862 * should receive and we simply bounce the request back to the 1863 * gadget driver for further processing. 1864 */ 1865 req->request.actual += req->request.length - count; 1866 if (s_pkt) 1867 return 1; 1868 if ((event->status & DEPEVT_STATUS_LST) && 1869 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1870 DWC3_TRB_CTRL_HWO))) 1871 return 1; 1872 if ((event->status & DEPEVT_STATUS_IOC) && 1873 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1874 return 1; 1875 return 0; 1876 } 1877 1878 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1879 const struct dwc3_event_depevt *event, int status) 1880 { 1881 struct dwc3_request *req; 1882 struct dwc3_trb *trb; 1883 unsigned int slot; 1884 unsigned int i; 1885 int ret; 1886 1887 do { 1888 req = next_request(&dep->req_queued); 1889 if (!req) { 1890 WARN_ON_ONCE(1); 1891 return 1; 1892 } 1893 i = 0; 1894 do { 1895 slot = req->start_slot + i; 1896 if ((slot == DWC3_TRB_NUM - 1) && 1897 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1898 slot++; 1899 slot %= DWC3_TRB_NUM; 1900 trb = &dep->trb_pool[slot]; 1901 1902 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1903 event, status); 1904 if (ret) 1905 break; 1906 } while (++i < req->request.num_mapped_sgs); 1907 1908 dwc3_gadget_giveback(dep, req, status); 1909 1910 if (ret) 1911 break; 1912 } while (1); 1913 1914 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1915 list_empty(&dep->req_queued)) { 1916 if (list_empty(&dep->request_list)) { 1917 /* 1918 * If there is no entry in request list then do 1919 * not issue END TRANSFER now. Just set PENDING 1920 * flag, so that END TRANSFER is issued when an 1921 * entry is added into request list. 1922 */ 1923 dep->flags = DWC3_EP_PENDING_REQUEST; 1924 } else { 1925 dwc3_stop_active_transfer(dwc, dep->number, true); 1926 dep->flags = DWC3_EP_ENABLED; 1927 } 1928 return 1; 1929 } 1930 1931 return 1; 1932 } 1933 1934 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1935 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1936 { 1937 unsigned status = 0; 1938 int clean_busy; 1939 u32 is_xfer_complete; 1940 1941 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 1942 1943 if (event->status & DEPEVT_STATUS_BUSERR) 1944 status = -ECONNRESET; 1945 1946 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1947 if (clean_busy && (is_xfer_complete || 1948 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 1949 dep->flags &= ~DWC3_EP_BUSY; 1950 1951 /* 1952 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1953 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1954 */ 1955 if (dwc->revision < DWC3_REVISION_183A) { 1956 u32 reg; 1957 int i; 1958 1959 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1960 dep = dwc->eps[i]; 1961 1962 if (!(dep->flags & DWC3_EP_ENABLED)) 1963 continue; 1964 1965 if (!list_empty(&dep->req_queued)) 1966 return; 1967 } 1968 1969 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1970 reg |= dwc->u1u2; 1971 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1972 1973 dwc->u1u2 = 0; 1974 } 1975 1976 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1977 int ret; 1978 1979 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete); 1980 if (!ret || ret == -EBUSY) 1981 return; 1982 } 1983 } 1984 1985 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1986 const struct dwc3_event_depevt *event) 1987 { 1988 struct dwc3_ep *dep; 1989 u8 epnum = event->endpoint_number; 1990 1991 dep = dwc->eps[epnum]; 1992 1993 if (!(dep->flags & DWC3_EP_ENABLED)) 1994 return; 1995 1996 if (epnum == 0 || epnum == 1) { 1997 dwc3_ep0_interrupt(dwc, event); 1998 return; 1999 } 2000 2001 switch (event->endpoint_event) { 2002 case DWC3_DEPEVT_XFERCOMPLETE: 2003 dep->resource_index = 0; 2004 2005 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2006 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 2007 dep->name); 2008 return; 2009 } 2010 2011 dwc3_endpoint_transfer_complete(dwc, dep, event); 2012 break; 2013 case DWC3_DEPEVT_XFERINPROGRESS: 2014 dwc3_endpoint_transfer_complete(dwc, dep, event); 2015 break; 2016 case DWC3_DEPEVT_XFERNOTREADY: 2017 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2018 dwc3_gadget_start_isoc(dwc, dep, event); 2019 } else { 2020 int active; 2021 int ret; 2022 2023 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2024 2025 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2026 dep->name, active ? "Transfer Active" 2027 : "Transfer Not Active"); 2028 2029 ret = __dwc3_gadget_kick_transfer(dep, 0, !active); 2030 if (!ret || ret == -EBUSY) 2031 return; 2032 2033 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 2034 dep->name); 2035 } 2036 2037 break; 2038 case DWC3_DEPEVT_STREAMEVT: 2039 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2040 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2041 dep->name); 2042 return; 2043 } 2044 2045 switch (event->status) { 2046 case DEPEVT_STREAMEVT_FOUND: 2047 dwc3_trace(trace_dwc3_gadget, 2048 "Stream %d found and started", 2049 event->parameters); 2050 2051 break; 2052 case DEPEVT_STREAMEVT_NOTFOUND: 2053 /* FALLTHROUGH */ 2054 default: 2055 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2056 } 2057 break; 2058 case DWC3_DEPEVT_RXTXFIFOEVT: 2059 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2060 break; 2061 case DWC3_DEPEVT_EPCMDCMPLT: 2062 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2063 break; 2064 } 2065 } 2066 2067 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2068 { 2069 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2070 spin_unlock(&dwc->lock); 2071 dwc->gadget_driver->disconnect(&dwc->gadget); 2072 spin_lock(&dwc->lock); 2073 } 2074 } 2075 2076 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2077 { 2078 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2079 spin_unlock(&dwc->lock); 2080 dwc->gadget_driver->suspend(&dwc->gadget); 2081 spin_lock(&dwc->lock); 2082 } 2083 } 2084 2085 static void dwc3_resume_gadget(struct dwc3 *dwc) 2086 { 2087 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2088 spin_unlock(&dwc->lock); 2089 dwc->gadget_driver->resume(&dwc->gadget); 2090 spin_lock(&dwc->lock); 2091 } 2092 } 2093 2094 static void dwc3_reset_gadget(struct dwc3 *dwc) 2095 { 2096 if (!dwc->gadget_driver) 2097 return; 2098 2099 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2100 spin_unlock(&dwc->lock); 2101 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2102 spin_lock(&dwc->lock); 2103 } 2104 } 2105 2106 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2107 { 2108 struct dwc3_ep *dep; 2109 struct dwc3_gadget_ep_cmd_params params; 2110 u32 cmd; 2111 int ret; 2112 2113 dep = dwc->eps[epnum]; 2114 2115 if (!dep->resource_index) 2116 return; 2117 2118 /* 2119 * NOTICE: We are violating what the Databook says about the 2120 * EndTransfer command. Ideally we would _always_ wait for the 2121 * EndTransfer Command Completion IRQ, but that's causing too 2122 * much trouble synchronizing between us and gadget driver. 2123 * 2124 * We have discussed this with the IP Provider and it was 2125 * suggested to giveback all requests here, but give HW some 2126 * extra time to synchronize with the interconnect. We're using 2127 * an arbitrary 100us delay for that. 2128 * 2129 * Note also that a similar handling was tested by Synopsys 2130 * (thanks a lot Paul) and nothing bad has come out of it. 2131 * In short, what we're doing is: 2132 * 2133 * - Issue EndTransfer WITH CMDIOC bit set 2134 * - Wait 100us 2135 */ 2136 2137 cmd = DWC3_DEPCMD_ENDTRANSFER; 2138 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2139 cmd |= DWC3_DEPCMD_CMDIOC; 2140 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2141 memset(¶ms, 0, sizeof(params)); 2142 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2143 WARN_ON_ONCE(ret); 2144 dep->resource_index = 0; 2145 dep->flags &= ~DWC3_EP_BUSY; 2146 udelay(100); 2147 } 2148 2149 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2150 { 2151 u32 epnum; 2152 2153 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2154 struct dwc3_ep *dep; 2155 2156 dep = dwc->eps[epnum]; 2157 if (!dep) 2158 continue; 2159 2160 if (!(dep->flags & DWC3_EP_ENABLED)) 2161 continue; 2162 2163 dwc3_remove_requests(dwc, dep); 2164 } 2165 } 2166 2167 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2168 { 2169 u32 epnum; 2170 2171 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2172 struct dwc3_ep *dep; 2173 struct dwc3_gadget_ep_cmd_params params; 2174 int ret; 2175 2176 dep = dwc->eps[epnum]; 2177 if (!dep) 2178 continue; 2179 2180 if (!(dep->flags & DWC3_EP_STALL)) 2181 continue; 2182 2183 dep->flags &= ~DWC3_EP_STALL; 2184 2185 memset(¶ms, 0, sizeof(params)); 2186 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2187 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2188 WARN_ON_ONCE(ret); 2189 } 2190 } 2191 2192 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2193 { 2194 int reg; 2195 2196 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2197 reg &= ~DWC3_DCTL_INITU1ENA; 2198 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2199 2200 reg &= ~DWC3_DCTL_INITU2ENA; 2201 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2202 2203 dwc3_disconnect_gadget(dwc); 2204 dwc->start_config_issued = false; 2205 2206 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2207 dwc->setup_packet_pending = false; 2208 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2209 } 2210 2211 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2212 { 2213 u32 reg; 2214 2215 /* 2216 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2217 * would cause a missing Disconnect Event if there's a 2218 * pending Setup Packet in the FIFO. 2219 * 2220 * There's no suggested workaround on the official Bug 2221 * report, which states that "unless the driver/application 2222 * is doing any special handling of a disconnect event, 2223 * there is no functional issue". 2224 * 2225 * Unfortunately, it turns out that we _do_ some special 2226 * handling of a disconnect event, namely complete all 2227 * pending transfers, notify gadget driver of the 2228 * disconnection, and so on. 2229 * 2230 * Our suggested workaround is to follow the Disconnect 2231 * Event steps here, instead, based on a setup_packet_pending 2232 * flag. Such flag gets set whenever we have a XferNotReady 2233 * event on EP0 and gets cleared on XferComplete for the 2234 * same endpoint. 2235 * 2236 * Refers to: 2237 * 2238 * STAR#9000466709: RTL: Device : Disconnect event not 2239 * generated if setup packet pending in FIFO 2240 */ 2241 if (dwc->revision < DWC3_REVISION_188A) { 2242 if (dwc->setup_packet_pending) 2243 dwc3_gadget_disconnect_interrupt(dwc); 2244 } 2245 2246 dwc3_reset_gadget(dwc); 2247 2248 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2249 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2250 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2251 dwc->test_mode = false; 2252 2253 dwc3_stop_active_transfers(dwc); 2254 dwc3_clear_stall_all_ep(dwc); 2255 dwc->start_config_issued = false; 2256 2257 /* Reset device address to zero */ 2258 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2259 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2260 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2261 } 2262 2263 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2264 { 2265 u32 reg; 2266 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2267 2268 /* 2269 * We change the clock only at SS but I dunno why I would want to do 2270 * this. Maybe it becomes part of the power saving plan. 2271 */ 2272 2273 if (speed != DWC3_DSTS_SUPERSPEED) 2274 return; 2275 2276 /* 2277 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2278 * each time on Connect Done. 2279 */ 2280 if (!usb30_clock) 2281 return; 2282 2283 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2284 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2285 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2286 } 2287 2288 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2289 { 2290 struct dwc3_ep *dep; 2291 int ret; 2292 u32 reg; 2293 u8 speed; 2294 2295 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2296 speed = reg & DWC3_DSTS_CONNECTSPD; 2297 dwc->speed = speed; 2298 2299 dwc3_update_ram_clk_sel(dwc, speed); 2300 2301 switch (speed) { 2302 case DWC3_DCFG_SUPERSPEED: 2303 /* 2304 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2305 * would cause a missing USB3 Reset event. 2306 * 2307 * In such situations, we should force a USB3 Reset 2308 * event by calling our dwc3_gadget_reset_interrupt() 2309 * routine. 2310 * 2311 * Refers to: 2312 * 2313 * STAR#9000483510: RTL: SS : USB3 reset event may 2314 * not be generated always when the link enters poll 2315 */ 2316 if (dwc->revision < DWC3_REVISION_190A) 2317 dwc3_gadget_reset_interrupt(dwc); 2318 2319 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2320 dwc->gadget.ep0->maxpacket = 512; 2321 dwc->gadget.speed = USB_SPEED_SUPER; 2322 break; 2323 case DWC3_DCFG_HIGHSPEED: 2324 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2325 dwc->gadget.ep0->maxpacket = 64; 2326 dwc->gadget.speed = USB_SPEED_HIGH; 2327 break; 2328 case DWC3_DCFG_FULLSPEED2: 2329 case DWC3_DCFG_FULLSPEED1: 2330 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2331 dwc->gadget.ep0->maxpacket = 64; 2332 dwc->gadget.speed = USB_SPEED_FULL; 2333 break; 2334 case DWC3_DCFG_LOWSPEED: 2335 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2336 dwc->gadget.ep0->maxpacket = 8; 2337 dwc->gadget.speed = USB_SPEED_LOW; 2338 break; 2339 } 2340 2341 /* Enable USB2 LPM Capability */ 2342 2343 if ((dwc->revision > DWC3_REVISION_194A) 2344 && (speed != DWC3_DCFG_SUPERSPEED)) { 2345 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2346 reg |= DWC3_DCFG_LPM_CAP; 2347 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2348 2349 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2350 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2351 2352 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2353 2354 /* 2355 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2356 * DCFG.LPMCap is set, core responses with an ACK and the 2357 * BESL value in the LPM token is less than or equal to LPM 2358 * NYET threshold. 2359 */ 2360 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2361 && dwc->has_lpm_erratum, 2362 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2363 2364 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2365 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2366 2367 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2368 } else { 2369 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2370 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2371 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2372 } 2373 2374 dep = dwc->eps[0]; 2375 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2376 false); 2377 if (ret) { 2378 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2379 return; 2380 } 2381 2382 dep = dwc->eps[1]; 2383 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2384 false); 2385 if (ret) { 2386 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2387 return; 2388 } 2389 2390 /* 2391 * Configure PHY via GUSB3PIPECTLn if required. 2392 * 2393 * Update GTXFIFOSIZn 2394 * 2395 * In both cases reset values should be sufficient. 2396 */ 2397 } 2398 2399 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2400 { 2401 /* 2402 * TODO take core out of low power mode when that's 2403 * implemented. 2404 */ 2405 2406 dwc->gadget_driver->resume(&dwc->gadget); 2407 } 2408 2409 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2410 unsigned int evtinfo) 2411 { 2412 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2413 unsigned int pwropt; 2414 2415 /* 2416 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2417 * Hibernation mode enabled which would show up when device detects 2418 * host-initiated U3 exit. 2419 * 2420 * In that case, device will generate a Link State Change Interrupt 2421 * from U3 to RESUME which is only necessary if Hibernation is 2422 * configured in. 2423 * 2424 * There are no functional changes due to such spurious event and we 2425 * just need to ignore it. 2426 * 2427 * Refers to: 2428 * 2429 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2430 * operational mode 2431 */ 2432 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2433 if ((dwc->revision < DWC3_REVISION_250A) && 2434 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2435 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2436 (next == DWC3_LINK_STATE_RESUME)) { 2437 dwc3_trace(trace_dwc3_gadget, 2438 "ignoring transition U3 -> Resume"); 2439 return; 2440 } 2441 } 2442 2443 /* 2444 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2445 * on the link partner, the USB session might do multiple entry/exit 2446 * of low power states before a transfer takes place. 2447 * 2448 * Due to this problem, we might experience lower throughput. The 2449 * suggested workaround is to disable DCTL[12:9] bits if we're 2450 * transitioning from U1/U2 to U0 and enable those bits again 2451 * after a transfer completes and there are no pending transfers 2452 * on any of the enabled endpoints. 2453 * 2454 * This is the first half of that workaround. 2455 * 2456 * Refers to: 2457 * 2458 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2459 * core send LGO_Ux entering U0 2460 */ 2461 if (dwc->revision < DWC3_REVISION_183A) { 2462 if (next == DWC3_LINK_STATE_U0) { 2463 u32 u1u2; 2464 u32 reg; 2465 2466 switch (dwc->link_state) { 2467 case DWC3_LINK_STATE_U1: 2468 case DWC3_LINK_STATE_U2: 2469 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2470 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2471 | DWC3_DCTL_ACCEPTU2ENA 2472 | DWC3_DCTL_INITU1ENA 2473 | DWC3_DCTL_ACCEPTU1ENA); 2474 2475 if (!dwc->u1u2) 2476 dwc->u1u2 = reg & u1u2; 2477 2478 reg &= ~u1u2; 2479 2480 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2481 break; 2482 default: 2483 /* do nothing */ 2484 break; 2485 } 2486 } 2487 } 2488 2489 switch (next) { 2490 case DWC3_LINK_STATE_U1: 2491 if (dwc->speed == USB_SPEED_SUPER) 2492 dwc3_suspend_gadget(dwc); 2493 break; 2494 case DWC3_LINK_STATE_U2: 2495 case DWC3_LINK_STATE_U3: 2496 dwc3_suspend_gadget(dwc); 2497 break; 2498 case DWC3_LINK_STATE_RESUME: 2499 dwc3_resume_gadget(dwc); 2500 break; 2501 default: 2502 /* do nothing */ 2503 break; 2504 } 2505 2506 dwc->link_state = next; 2507 } 2508 2509 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2510 unsigned int evtinfo) 2511 { 2512 unsigned int is_ss = evtinfo & BIT(4); 2513 2514 /** 2515 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2516 * have a known issue which can cause USB CV TD.9.23 to fail 2517 * randomly. 2518 * 2519 * Because of this issue, core could generate bogus hibernation 2520 * events which SW needs to ignore. 2521 * 2522 * Refers to: 2523 * 2524 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2525 * Device Fallback from SuperSpeed 2526 */ 2527 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2528 return; 2529 2530 /* enter hibernation here */ 2531 } 2532 2533 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2534 const struct dwc3_event_devt *event) 2535 { 2536 switch (event->type) { 2537 case DWC3_DEVICE_EVENT_DISCONNECT: 2538 dwc3_gadget_disconnect_interrupt(dwc); 2539 break; 2540 case DWC3_DEVICE_EVENT_RESET: 2541 dwc3_gadget_reset_interrupt(dwc); 2542 break; 2543 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2544 dwc3_gadget_conndone_interrupt(dwc); 2545 break; 2546 case DWC3_DEVICE_EVENT_WAKEUP: 2547 dwc3_gadget_wakeup_interrupt(dwc); 2548 break; 2549 case DWC3_DEVICE_EVENT_HIBER_REQ: 2550 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2551 "unexpected hibernation event\n")) 2552 break; 2553 2554 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2555 break; 2556 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2557 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2558 break; 2559 case DWC3_DEVICE_EVENT_EOPF: 2560 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2561 break; 2562 case DWC3_DEVICE_EVENT_SOF: 2563 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2564 break; 2565 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2566 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2567 break; 2568 case DWC3_DEVICE_EVENT_CMD_CMPL: 2569 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2570 break; 2571 case DWC3_DEVICE_EVENT_OVERFLOW: 2572 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2573 break; 2574 default: 2575 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2576 } 2577 } 2578 2579 static void dwc3_process_event_entry(struct dwc3 *dwc, 2580 const union dwc3_event *event) 2581 { 2582 trace_dwc3_event(event->raw); 2583 2584 /* Endpoint IRQ, handle it and return early */ 2585 if (event->type.is_devspec == 0) { 2586 /* depevt */ 2587 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2588 } 2589 2590 switch (event->type.type) { 2591 case DWC3_EVENT_TYPE_DEV: 2592 dwc3_gadget_interrupt(dwc, &event->devt); 2593 break; 2594 /* REVISIT what to do with Carkit and I2C events ? */ 2595 default: 2596 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2597 } 2598 } 2599 2600 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2601 { 2602 struct dwc3_event_buffer *evt; 2603 irqreturn_t ret = IRQ_NONE; 2604 int left; 2605 u32 reg; 2606 2607 evt = dwc->ev_buffs[buf]; 2608 left = evt->count; 2609 2610 if (!(evt->flags & DWC3_EVENT_PENDING)) 2611 return IRQ_NONE; 2612 2613 while (left > 0) { 2614 union dwc3_event event; 2615 2616 event.raw = *(u32 *) (evt->buf + evt->lpos); 2617 2618 dwc3_process_event_entry(dwc, &event); 2619 2620 /* 2621 * FIXME we wrap around correctly to the next entry as 2622 * almost all entries are 4 bytes in size. There is one 2623 * entry which has 12 bytes which is a regular entry 2624 * followed by 8 bytes data. ATM I don't know how 2625 * things are organized if we get next to the a 2626 * boundary so I worry about that once we try to handle 2627 * that. 2628 */ 2629 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2630 left -= 4; 2631 2632 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2633 } 2634 2635 evt->count = 0; 2636 evt->flags &= ~DWC3_EVENT_PENDING; 2637 ret = IRQ_HANDLED; 2638 2639 /* Unmask interrupt */ 2640 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2641 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2642 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2643 2644 return ret; 2645 } 2646 2647 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2648 { 2649 struct dwc3 *dwc = _dwc; 2650 unsigned long flags; 2651 irqreturn_t ret = IRQ_NONE; 2652 int i; 2653 2654 spin_lock_irqsave(&dwc->lock, flags); 2655 2656 for (i = 0; i < dwc->num_event_buffers; i++) 2657 ret |= dwc3_process_event_buf(dwc, i); 2658 2659 spin_unlock_irqrestore(&dwc->lock, flags); 2660 2661 return ret; 2662 } 2663 2664 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2665 { 2666 struct dwc3_event_buffer *evt; 2667 u32 count; 2668 u32 reg; 2669 2670 evt = dwc->ev_buffs[buf]; 2671 2672 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2673 count &= DWC3_GEVNTCOUNT_MASK; 2674 if (!count) 2675 return IRQ_NONE; 2676 2677 evt->count = count; 2678 evt->flags |= DWC3_EVENT_PENDING; 2679 2680 /* Mask interrupt */ 2681 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2682 reg |= DWC3_GEVNTSIZ_INTMASK; 2683 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2684 2685 return IRQ_WAKE_THREAD; 2686 } 2687 2688 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2689 { 2690 struct dwc3 *dwc = _dwc; 2691 int i; 2692 irqreturn_t ret = IRQ_NONE; 2693 2694 for (i = 0; i < dwc->num_event_buffers; i++) { 2695 irqreturn_t status; 2696 2697 status = dwc3_check_event_buf(dwc, i); 2698 if (status == IRQ_WAKE_THREAD) 2699 ret = status; 2700 } 2701 2702 return ret; 2703 } 2704 2705 /** 2706 * dwc3_gadget_init - Initializes gadget related registers 2707 * @dwc: pointer to our controller context structure 2708 * 2709 * Returns 0 on success otherwise negative errno. 2710 */ 2711 int dwc3_gadget_init(struct dwc3 *dwc) 2712 { 2713 int ret; 2714 2715 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2716 &dwc->ctrl_req_addr, GFP_KERNEL); 2717 if (!dwc->ctrl_req) { 2718 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2719 ret = -ENOMEM; 2720 goto err0; 2721 } 2722 2723 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2724 &dwc->ep0_trb_addr, GFP_KERNEL); 2725 if (!dwc->ep0_trb) { 2726 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2727 ret = -ENOMEM; 2728 goto err1; 2729 } 2730 2731 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2732 if (!dwc->setup_buf) { 2733 ret = -ENOMEM; 2734 goto err2; 2735 } 2736 2737 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2738 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2739 GFP_KERNEL); 2740 if (!dwc->ep0_bounce) { 2741 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2742 ret = -ENOMEM; 2743 goto err3; 2744 } 2745 2746 dwc->gadget.ops = &dwc3_gadget_ops; 2747 dwc->gadget.max_speed = USB_SPEED_SUPER; 2748 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2749 dwc->gadget.sg_supported = true; 2750 dwc->gadget.name = "dwc3-gadget"; 2751 2752 /* 2753 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2754 * on ep out. 2755 */ 2756 dwc->gadget.quirk_ep_out_aligned_size = true; 2757 2758 /* 2759 * REVISIT: Here we should clear all pending IRQs to be 2760 * sure we're starting from a well known location. 2761 */ 2762 2763 ret = dwc3_gadget_init_endpoints(dwc); 2764 if (ret) 2765 goto err4; 2766 2767 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2768 if (ret) { 2769 dev_err(dwc->dev, "failed to register udc\n"); 2770 goto err4; 2771 } 2772 2773 return 0; 2774 2775 err4: 2776 dwc3_gadget_free_endpoints(dwc); 2777 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2778 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2779 2780 err3: 2781 kfree(dwc->setup_buf); 2782 2783 err2: 2784 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2785 dwc->ep0_trb, dwc->ep0_trb_addr); 2786 2787 err1: 2788 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2789 dwc->ctrl_req, dwc->ctrl_req_addr); 2790 2791 err0: 2792 return ret; 2793 } 2794 2795 /* -------------------------------------------------------------------------- */ 2796 2797 void dwc3_gadget_exit(struct dwc3 *dwc) 2798 { 2799 usb_del_gadget_udc(&dwc->gadget); 2800 2801 dwc3_gadget_free_endpoints(dwc); 2802 2803 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2804 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2805 2806 kfree(dwc->setup_buf); 2807 2808 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2809 dwc->ep0_trb, dwc->ep0_trb_addr); 2810 2811 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2812 dwc->ctrl_req, dwc->ctrl_req_addr); 2813 } 2814 2815 int dwc3_gadget_suspend(struct dwc3 *dwc) 2816 { 2817 if (dwc->pullups_connected) { 2818 dwc3_gadget_disable_irq(dwc); 2819 dwc3_gadget_run_stop(dwc, true, true); 2820 } 2821 2822 __dwc3_gadget_ep_disable(dwc->eps[0]); 2823 __dwc3_gadget_ep_disable(dwc->eps[1]); 2824 2825 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2826 2827 return 0; 2828 } 2829 2830 int dwc3_gadget_resume(struct dwc3 *dwc) 2831 { 2832 struct dwc3_ep *dep; 2833 int ret; 2834 2835 /* Start with SuperSpeed Default */ 2836 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2837 2838 dep = dwc->eps[0]; 2839 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2840 false); 2841 if (ret) 2842 goto err0; 2843 2844 dep = dwc->eps[1]; 2845 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2846 false); 2847 if (ret) 2848 goto err1; 2849 2850 /* begin to receive SETUP packets */ 2851 dwc->ep0state = EP0_SETUP_PHASE; 2852 dwc3_ep0_out_start(dwc); 2853 2854 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2855 2856 if (dwc->pullups_connected) { 2857 dwc3_gadget_enable_irq(dwc); 2858 dwc3_gadget_run_stop(dwc, true, false); 2859 } 2860 2861 return 0; 2862 2863 err1: 2864 __dwc3_gadget_ep_disable(dwc->eps[0]); 2865 2866 err0: 2867 return ret; 2868 } 2869