1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2, as published by the Free 24 * Software Foundation. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <linux/kernel.h> 40 #include <linux/delay.h> 41 #include <linux/slab.h> 42 #include <linux/spinlock.h> 43 #include <linux/platform_device.h> 44 #include <linux/pm_runtime.h> 45 #include <linux/interrupt.h> 46 #include <linux/io.h> 47 #include <linux/list.h> 48 #include <linux/dma-mapping.h> 49 50 #include <linux/usb/ch9.h> 51 #include <linux/usb/gadget.h> 52 53 #include "core.h" 54 #include "gadget.h" 55 #include "io.h" 56 57 /** 58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 59 * @dwc: pointer to our context structure 60 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 61 * 62 * Caller should take care of locking. This function will 63 * return 0 on success or -EINVAL if wrong Test Selector 64 * is passed 65 */ 66 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 67 { 68 u32 reg; 69 70 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 71 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 72 73 switch (mode) { 74 case TEST_J: 75 case TEST_K: 76 case TEST_SE0_NAK: 77 case TEST_PACKET: 78 case TEST_FORCE_EN: 79 reg |= mode << 1; 80 break; 81 default: 82 return -EINVAL; 83 } 84 85 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 86 87 return 0; 88 } 89 90 /** 91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 92 * @dwc: pointer to our context structure 93 * @state: the state to put link into 94 * 95 * Caller should take care of locking. This function will 96 * return 0 on success or -ETIMEDOUT. 97 */ 98 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 99 { 100 int retries = 10000; 101 u32 reg; 102 103 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 104 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 105 106 /* set requested state */ 107 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 108 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 109 110 /* wait for a change in DSTS */ 111 while (--retries) { 112 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 113 114 if (DWC3_DSTS_USBLNKST(reg) == state) 115 return 0; 116 117 udelay(5); 118 } 119 120 dev_vdbg(dwc->dev, "link state change request timed out\n"); 121 122 return -ETIMEDOUT; 123 } 124 125 /** 126 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 127 * @dwc: pointer to our context structure 128 * 129 * This function will a best effort FIFO allocation in order 130 * to improve FIFO usage and throughput, while still allowing 131 * us to enable as many endpoints as possible. 132 * 133 * Keep in mind that this operation will be highly dependent 134 * on the configured size for RAM1 - which contains TxFifo -, 135 * the amount of endpoints enabled on coreConsultant tool, and 136 * the width of the Master Bus. 137 * 138 * In the ideal world, we would always be able to satisfy the 139 * following equation: 140 * 141 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 142 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 143 * 144 * Unfortunately, due to many variables that's not always the case. 145 */ 146 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 147 { 148 int last_fifo_depth = 0; 149 int ram1_depth; 150 int fifo_size; 151 int mdwidth; 152 int num; 153 154 if (!dwc->needs_fifo_resize) 155 return 0; 156 157 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 158 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 159 160 /* MDWIDTH is represented in bits, we need it in bytes */ 161 mdwidth >>= 3; 162 163 /* 164 * FIXME For now we will only allocate 1 wMaxPacketSize space 165 * for each enabled endpoint, later patches will come to 166 * improve this algorithm so that we better use the internal 167 * FIFO space 168 */ 169 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) { 170 struct dwc3_ep *dep = dwc->eps[num]; 171 int fifo_number = dep->number >> 1; 172 int mult = 1; 173 int tmp; 174 175 if (!(dep->number & 1)) 176 continue; 177 178 if (!(dep->flags & DWC3_EP_ENABLED)) 179 continue; 180 181 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 182 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 183 mult = 3; 184 185 /* 186 * REVISIT: the following assumes we will always have enough 187 * space available on the FIFO RAM for all possible use cases. 188 * Make sure that's true somehow and change FIFO allocation 189 * accordingly. 190 * 191 * If we have Bulk or Isochronous endpoints, we want 192 * them to be able to be very, very fast. So we're giving 193 * those endpoints a fifo_size which is enough for 3 full 194 * packets 195 */ 196 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 197 tmp += mdwidth; 198 199 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 200 201 fifo_size |= (last_fifo_depth << 16); 202 203 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 204 dep->name, last_fifo_depth, fifo_size & 0xffff); 205 206 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number), 207 fifo_size); 208 209 last_fifo_depth += (fifo_size & 0xffff); 210 } 211 212 return 0; 213 } 214 215 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 216 int status) 217 { 218 struct dwc3 *dwc = dep->dwc; 219 220 if (req->queued) { 221 if (req->request.num_mapped_sgs) 222 dep->busy_slot += req->request.num_mapped_sgs; 223 else 224 dep->busy_slot++; 225 226 /* 227 * Skip LINK TRB. We can't use req->trb and check for 228 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just 229 * completed (not the LINK TRB). 230 */ 231 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 232 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 233 dep->busy_slot++; 234 } 235 list_del(&req->list); 236 req->trb = NULL; 237 238 if (req->request.status == -EINPROGRESS) 239 req->request.status = status; 240 241 usb_gadget_unmap_request(&dwc->gadget, &req->request, 242 req->direction); 243 244 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 245 req, dep->name, req->request.actual, 246 req->request.length, status); 247 248 spin_unlock(&dwc->lock); 249 req->request.complete(&dep->endpoint, &req->request); 250 spin_lock(&dwc->lock); 251 } 252 253 static const char *dwc3_gadget_ep_cmd_string(u8 cmd) 254 { 255 switch (cmd) { 256 case DWC3_DEPCMD_DEPSTARTCFG: 257 return "Start New Configuration"; 258 case DWC3_DEPCMD_ENDTRANSFER: 259 return "End Transfer"; 260 case DWC3_DEPCMD_UPDATETRANSFER: 261 return "Update Transfer"; 262 case DWC3_DEPCMD_STARTTRANSFER: 263 return "Start Transfer"; 264 case DWC3_DEPCMD_CLEARSTALL: 265 return "Clear Stall"; 266 case DWC3_DEPCMD_SETSTALL: 267 return "Set Stall"; 268 case DWC3_DEPCMD_GETSEQNUMBER: 269 return "Get Data Sequence Number"; 270 case DWC3_DEPCMD_SETTRANSFRESOURCE: 271 return "Set Endpoint Transfer Resource"; 272 case DWC3_DEPCMD_SETEPCONFIG: 273 return "Set Endpoint Configuration"; 274 default: 275 return "UNKNOWN command"; 276 } 277 } 278 279 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param) 280 { 281 u32 timeout = 500; 282 u32 reg; 283 284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 286 287 do { 288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 289 if (!(reg & DWC3_DGCMD_CMDACT)) { 290 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 291 DWC3_DGCMD_STATUS(reg)); 292 return 0; 293 } 294 295 /* 296 * We can't sleep here, because it's also called from 297 * interrupt context. 298 */ 299 timeout--; 300 if (!timeout) 301 return -ETIMEDOUT; 302 udelay(1); 303 } while (1); 304 } 305 306 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 308 { 309 struct dwc3_ep *dep = dwc->eps[ep]; 310 u32 timeout = 500; 311 u32 reg; 312 313 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", 314 dep->name, 315 dwc3_gadget_ep_cmd_string(cmd), params->param0, 316 params->param1, params->param2); 317 318 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 319 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 321 322 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 323 do { 324 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 325 if (!(reg & DWC3_DEPCMD_CMDACT)) { 326 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 327 DWC3_DEPCMD_STATUS(reg)); 328 return 0; 329 } 330 331 /* 332 * We can't sleep here, because it is also called from 333 * interrupt context. 334 */ 335 timeout--; 336 if (!timeout) 337 return -ETIMEDOUT; 338 339 udelay(1); 340 } while (1); 341 } 342 343 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 344 struct dwc3_trb *trb) 345 { 346 u32 offset = (char *) trb - (char *) dep->trb_pool; 347 348 return dep->trb_pool_dma + offset; 349 } 350 351 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 352 { 353 struct dwc3 *dwc = dep->dwc; 354 355 if (dep->trb_pool) 356 return 0; 357 358 if (dep->number == 0 || dep->number == 1) 359 return 0; 360 361 dep->trb_pool = dma_alloc_coherent(dwc->dev, 362 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 363 &dep->trb_pool_dma, GFP_KERNEL); 364 if (!dep->trb_pool) { 365 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 366 dep->name); 367 return -ENOMEM; 368 } 369 370 return 0; 371 } 372 373 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 374 { 375 struct dwc3 *dwc = dep->dwc; 376 377 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 378 dep->trb_pool, dep->trb_pool_dma); 379 380 dep->trb_pool = NULL; 381 dep->trb_pool_dma = 0; 382 } 383 384 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 385 { 386 struct dwc3_gadget_ep_cmd_params params; 387 u32 cmd; 388 389 memset(¶ms, 0x00, sizeof(params)); 390 391 if (dep->number != 1) { 392 cmd = DWC3_DEPCMD_DEPSTARTCFG; 393 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 394 if (dep->number > 1) { 395 if (dwc->start_config_issued) 396 return 0; 397 dwc->start_config_issued = true; 398 cmd |= DWC3_DEPCMD_PARAM(2); 399 } 400 401 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 402 } 403 404 return 0; 405 } 406 407 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 408 const struct usb_endpoint_descriptor *desc, 409 const struct usb_ss_ep_comp_descriptor *comp_desc) 410 { 411 struct dwc3_gadget_ep_cmd_params params; 412 413 memset(¶ms, 0x00, sizeof(params)); 414 415 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 416 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)) 417 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst); 418 419 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 420 | DWC3_DEPCFG_XFER_NOT_READY_EN; 421 422 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 423 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 424 | DWC3_DEPCFG_STREAM_EVENT_EN; 425 dep->stream_capable = true; 426 } 427 428 if (usb_endpoint_xfer_isoc(desc)) 429 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 430 431 /* 432 * We are doing 1:1 mapping for endpoints, meaning 433 * Physical Endpoints 2 maps to Logical Endpoint 2 and 434 * so on. We consider the direction bit as part of the physical 435 * endpoint number. So USB endpoint 0x81 is 0x03. 436 */ 437 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 438 439 /* 440 * We must use the lower 16 TX FIFOs even though 441 * HW might have more 442 */ 443 if (dep->direction) 444 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 445 446 if (desc->bInterval) { 447 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 448 dep->interval = 1 << (desc->bInterval - 1); 449 } 450 451 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 452 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 453 } 454 455 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 456 { 457 struct dwc3_gadget_ep_cmd_params params; 458 459 memset(¶ms, 0x00, sizeof(params)); 460 461 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 462 463 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 464 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 465 } 466 467 /** 468 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 469 * @dep: endpoint to be initialized 470 * @desc: USB Endpoint Descriptor 471 * 472 * Caller should take care of locking 473 */ 474 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 475 const struct usb_endpoint_descriptor *desc, 476 const struct usb_ss_ep_comp_descriptor *comp_desc) 477 { 478 struct dwc3 *dwc = dep->dwc; 479 u32 reg; 480 int ret = -ENOMEM; 481 482 if (!(dep->flags & DWC3_EP_ENABLED)) { 483 ret = dwc3_gadget_start_config(dwc, dep); 484 if (ret) 485 return ret; 486 } 487 488 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc); 489 if (ret) 490 return ret; 491 492 if (!(dep->flags & DWC3_EP_ENABLED)) { 493 struct dwc3_trb *trb_st_hw; 494 struct dwc3_trb *trb_link; 495 496 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 497 if (ret) 498 return ret; 499 500 dep->endpoint.desc = desc; 501 dep->comp_desc = comp_desc; 502 dep->type = usb_endpoint_type(desc); 503 dep->flags |= DWC3_EP_ENABLED; 504 505 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 506 reg |= DWC3_DALEPENA_EP(dep->number); 507 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 508 509 if (!usb_endpoint_xfer_isoc(desc)) 510 return 0; 511 512 memset(&trb_link, 0, sizeof(trb_link)); 513 514 /* Link TRB for ISOC. The HWO bit is never reset */ 515 trb_st_hw = &dep->trb_pool[0]; 516 517 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 518 519 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 520 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 521 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 522 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 523 } 524 525 return 0; 526 } 527 528 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); 529 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 530 { 531 struct dwc3_request *req; 532 533 if (!list_empty(&dep->req_queued)) 534 dwc3_stop_active_transfer(dwc, dep->number); 535 536 while (!list_empty(&dep->request_list)) { 537 req = next_request(&dep->request_list); 538 539 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 540 } 541 } 542 543 /** 544 * __dwc3_gadget_ep_disable - Disables a HW endpoint 545 * @dep: the endpoint to disable 546 * 547 * This function also removes requests which are currently processed ny the 548 * hardware and those which are not yet scheduled. 549 * Caller should take care of locking. 550 */ 551 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 552 { 553 struct dwc3 *dwc = dep->dwc; 554 u32 reg; 555 556 dwc3_remove_requests(dwc, dep); 557 558 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 559 reg &= ~DWC3_DALEPENA_EP(dep->number); 560 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 561 562 dep->stream_capable = false; 563 dep->endpoint.desc = NULL; 564 dep->comp_desc = NULL; 565 dep->type = 0; 566 dep->flags = 0; 567 568 return 0; 569 } 570 571 /* -------------------------------------------------------------------------- */ 572 573 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 574 const struct usb_endpoint_descriptor *desc) 575 { 576 return -EINVAL; 577 } 578 579 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 580 { 581 return -EINVAL; 582 } 583 584 /* -------------------------------------------------------------------------- */ 585 586 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 587 const struct usb_endpoint_descriptor *desc) 588 { 589 struct dwc3_ep *dep; 590 struct dwc3 *dwc; 591 unsigned long flags; 592 int ret; 593 594 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 595 pr_debug("dwc3: invalid parameters\n"); 596 return -EINVAL; 597 } 598 599 if (!desc->wMaxPacketSize) { 600 pr_debug("dwc3: missing wMaxPacketSize\n"); 601 return -EINVAL; 602 } 603 604 dep = to_dwc3_ep(ep); 605 dwc = dep->dwc; 606 607 switch (usb_endpoint_type(desc)) { 608 case USB_ENDPOINT_XFER_CONTROL: 609 strlcat(dep->name, "-control", sizeof(dep->name)); 610 break; 611 case USB_ENDPOINT_XFER_ISOC: 612 strlcat(dep->name, "-isoc", sizeof(dep->name)); 613 break; 614 case USB_ENDPOINT_XFER_BULK: 615 strlcat(dep->name, "-bulk", sizeof(dep->name)); 616 break; 617 case USB_ENDPOINT_XFER_INT: 618 strlcat(dep->name, "-int", sizeof(dep->name)); 619 break; 620 default: 621 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 622 } 623 624 if (dep->flags & DWC3_EP_ENABLED) { 625 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 626 dep->name); 627 return 0; 628 } 629 630 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 631 632 spin_lock_irqsave(&dwc->lock, flags); 633 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc); 634 spin_unlock_irqrestore(&dwc->lock, flags); 635 636 return ret; 637 } 638 639 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 640 { 641 struct dwc3_ep *dep; 642 struct dwc3 *dwc; 643 unsigned long flags; 644 int ret; 645 646 if (!ep) { 647 pr_debug("dwc3: invalid parameters\n"); 648 return -EINVAL; 649 } 650 651 dep = to_dwc3_ep(ep); 652 dwc = dep->dwc; 653 654 if (!(dep->flags & DWC3_EP_ENABLED)) { 655 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 656 dep->name); 657 return 0; 658 } 659 660 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 661 dep->number >> 1, 662 (dep->number & 1) ? "in" : "out"); 663 664 spin_lock_irqsave(&dwc->lock, flags); 665 ret = __dwc3_gadget_ep_disable(dep); 666 spin_unlock_irqrestore(&dwc->lock, flags); 667 668 return ret; 669 } 670 671 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 672 gfp_t gfp_flags) 673 { 674 struct dwc3_request *req; 675 struct dwc3_ep *dep = to_dwc3_ep(ep); 676 struct dwc3 *dwc = dep->dwc; 677 678 req = kzalloc(sizeof(*req), gfp_flags); 679 if (!req) { 680 dev_err(dwc->dev, "not enough memory\n"); 681 return NULL; 682 } 683 684 req->epnum = dep->number; 685 req->dep = dep; 686 687 return &req->request; 688 } 689 690 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 691 struct usb_request *request) 692 { 693 struct dwc3_request *req = to_dwc3_request(request); 694 695 kfree(req); 696 } 697 698 /** 699 * dwc3_prepare_one_trb - setup one TRB from one request 700 * @dep: endpoint for which this request is prepared 701 * @req: dwc3_request pointer 702 */ 703 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 704 struct dwc3_request *req, dma_addr_t dma, 705 unsigned length, unsigned last, unsigned chain) 706 { 707 struct dwc3 *dwc = dep->dwc; 708 struct dwc3_trb *trb; 709 710 unsigned int cur_slot; 711 712 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 713 dep->name, req, (unsigned long long) dma, 714 length, last ? " last" : "", 715 chain ? " chain" : ""); 716 717 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 718 cur_slot = dep->free_slot; 719 dep->free_slot++; 720 721 /* Skip the LINK-TRB on ISOC */ 722 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 723 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 724 return; 725 726 if (!req->trb) { 727 dwc3_gadget_move_request_queued(req); 728 req->trb = trb; 729 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 730 } 731 732 trb->size = DWC3_TRB_SIZE_LENGTH(length); 733 trb->bpl = lower_32_bits(dma); 734 trb->bph = upper_32_bits(dma); 735 736 switch (usb_endpoint_type(dep->endpoint.desc)) { 737 case USB_ENDPOINT_XFER_CONTROL: 738 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 739 break; 740 741 case USB_ENDPOINT_XFER_ISOC: 742 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 743 744 /* IOC every DWC3_TRB_NUM / 4 so we can refill */ 745 if (!(cur_slot % (DWC3_TRB_NUM / 4))) 746 trb->ctrl |= DWC3_TRB_CTRL_IOC; 747 break; 748 749 case USB_ENDPOINT_XFER_BULK: 750 case USB_ENDPOINT_XFER_INT: 751 trb->ctrl = DWC3_TRBCTL_NORMAL; 752 break; 753 default: 754 /* 755 * This is only possible with faulty memory because we 756 * checked it already :) 757 */ 758 BUG(); 759 } 760 761 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 762 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 763 trb->ctrl |= DWC3_TRB_CTRL_CSP; 764 } else { 765 if (chain) 766 trb->ctrl |= DWC3_TRB_CTRL_CHN; 767 768 if (last) 769 trb->ctrl |= DWC3_TRB_CTRL_LST; 770 } 771 772 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 773 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 774 775 trb->ctrl |= DWC3_TRB_CTRL_HWO; 776 } 777 778 /* 779 * dwc3_prepare_trbs - setup TRBs from requests 780 * @dep: endpoint for which requests are being prepared 781 * @starting: true if the endpoint is idle and no requests are queued. 782 * 783 * The function goes through the requests list and sets up TRBs for the 784 * transfers. The function returns once there are no more TRBs available or 785 * it runs out of requests. 786 */ 787 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 788 { 789 struct dwc3_request *req, *n; 790 u32 trbs_left; 791 u32 max; 792 unsigned int last_one = 0; 793 794 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 795 796 /* the first request must not be queued */ 797 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 798 799 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 800 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 801 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 802 if (trbs_left > max) 803 trbs_left = max; 804 } 805 806 /* 807 * If busy & slot are equal than it is either full or empty. If we are 808 * starting to process requests then we are empty. Otherwise we are 809 * full and don't do anything 810 */ 811 if (!trbs_left) { 812 if (!starting) 813 return; 814 trbs_left = DWC3_TRB_NUM; 815 /* 816 * In case we start from scratch, we queue the ISOC requests 817 * starting from slot 1. This is done because we use ring 818 * buffer and have no LST bit to stop us. Instead, we place 819 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 820 * after the first request so we start at slot 1 and have 821 * 7 requests proceed before we hit the first IOC. 822 * Other transfer types don't use the ring buffer and are 823 * processed from the first TRB until the last one. Since we 824 * don't wrap around we have to start at the beginning. 825 */ 826 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 827 dep->busy_slot = 1; 828 dep->free_slot = 1; 829 } else { 830 dep->busy_slot = 0; 831 dep->free_slot = 0; 832 } 833 } 834 835 /* The last TRB is a link TRB, not used for xfer */ 836 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 837 return; 838 839 list_for_each_entry_safe(req, n, &dep->request_list, list) { 840 unsigned length; 841 dma_addr_t dma; 842 843 if (req->request.num_mapped_sgs > 0) { 844 struct usb_request *request = &req->request; 845 struct scatterlist *sg = request->sg; 846 struct scatterlist *s; 847 int i; 848 849 for_each_sg(sg, s, request->num_mapped_sgs, i) { 850 unsigned chain = true; 851 852 length = sg_dma_len(s); 853 dma = sg_dma_address(s); 854 855 if (i == (request->num_mapped_sgs - 1) || 856 sg_is_last(s)) { 857 last_one = true; 858 chain = false; 859 } 860 861 trbs_left--; 862 if (!trbs_left) 863 last_one = true; 864 865 if (last_one) 866 chain = false; 867 868 dwc3_prepare_one_trb(dep, req, dma, length, 869 last_one, chain); 870 871 if (last_one) 872 break; 873 } 874 } else { 875 dma = req->request.dma; 876 length = req->request.length; 877 trbs_left--; 878 879 if (!trbs_left) 880 last_one = 1; 881 882 /* Is this the last request? */ 883 if (list_is_last(&req->list, &dep->request_list)) 884 last_one = 1; 885 886 dwc3_prepare_one_trb(dep, req, dma, length, 887 last_one, false); 888 889 if (last_one) 890 break; 891 } 892 } 893 } 894 895 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 896 int start_new) 897 { 898 struct dwc3_gadget_ep_cmd_params params; 899 struct dwc3_request *req; 900 struct dwc3 *dwc = dep->dwc; 901 int ret; 902 u32 cmd; 903 904 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 905 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 906 return -EBUSY; 907 } 908 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 909 910 /* 911 * If we are getting here after a short-out-packet we don't enqueue any 912 * new requests as we try to set the IOC bit only on the last request. 913 */ 914 if (start_new) { 915 if (list_empty(&dep->req_queued)) 916 dwc3_prepare_trbs(dep, start_new); 917 918 /* req points to the first request which will be sent */ 919 req = next_request(&dep->req_queued); 920 } else { 921 dwc3_prepare_trbs(dep, start_new); 922 923 /* 924 * req points to the first request where HWO changed from 0 to 1 925 */ 926 req = next_request(&dep->req_queued); 927 } 928 if (!req) { 929 dep->flags |= DWC3_EP_PENDING_REQUEST; 930 return 0; 931 } 932 933 memset(¶ms, 0, sizeof(params)); 934 params.param0 = upper_32_bits(req->trb_dma); 935 params.param1 = lower_32_bits(req->trb_dma); 936 937 if (start_new) 938 cmd = DWC3_DEPCMD_STARTTRANSFER; 939 else 940 cmd = DWC3_DEPCMD_UPDATETRANSFER; 941 942 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 943 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 944 if (ret < 0) { 945 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 946 947 /* 948 * FIXME we need to iterate over the list of requests 949 * here and stop, unmap, free and del each of the linked 950 * requests instead of what we do now. 951 */ 952 usb_gadget_unmap_request(&dwc->gadget, &req->request, 953 req->direction); 954 list_del(&req->list); 955 return ret; 956 } 957 958 dep->flags |= DWC3_EP_BUSY; 959 960 if (start_new) { 961 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc, 962 dep->number); 963 WARN_ON_ONCE(!dep->res_trans_idx); 964 } 965 966 return 0; 967 } 968 969 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 970 { 971 struct dwc3 *dwc = dep->dwc; 972 int ret; 973 974 req->request.actual = 0; 975 req->request.status = -EINPROGRESS; 976 req->direction = dep->direction; 977 req->epnum = dep->number; 978 979 /* 980 * We only add to our list of requests now and 981 * start consuming the list once we get XferNotReady 982 * IRQ. 983 * 984 * That way, we avoid doing anything that we don't need 985 * to do now and defer it until the point we receive a 986 * particular token from the Host side. 987 * 988 * This will also avoid Host cancelling URBs due to too 989 * many NAKs. 990 */ 991 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 992 dep->direction); 993 if (ret) 994 return ret; 995 996 list_add_tail(&req->list, &dep->request_list); 997 998 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && (dep->flags & DWC3_EP_BUSY)) 999 dep->flags |= DWC3_EP_PENDING_REQUEST; 1000 1001 /* 1002 * There are two special cases: 1003 * 1004 * 1. XferNotReady with empty list of requests. We need to kick the 1005 * transfer here in that situation, otherwise we will be NAKing 1006 * forever. If we get XferNotReady before gadget driver has a 1007 * chance to queue a request, we will ACK the IRQ but won't be 1008 * able to receive the data until the next request is queued. 1009 * The following code is handling exactly that. 1010 * 1011 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1012 * kick the transfer here after queuing a request, otherwise the 1013 * core may not see the modified TRB(s). 1014 */ 1015 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1016 int ret; 1017 int start_trans = 1; 1018 u8 trans_idx = dep->res_trans_idx; 1019 1020 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1021 (dep->flags & DWC3_EP_BUSY)) { 1022 start_trans = 0; 1023 WARN_ON_ONCE(!trans_idx); 1024 } else { 1025 trans_idx = 0; 1026 } 1027 1028 ret = __dwc3_gadget_kick_transfer(dep, trans_idx, start_trans); 1029 if (ret && ret != -EBUSY) { 1030 struct dwc3 *dwc = dep->dwc; 1031 1032 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1033 dep->name); 1034 } 1035 }; 1036 1037 return 0; 1038 } 1039 1040 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1041 gfp_t gfp_flags) 1042 { 1043 struct dwc3_request *req = to_dwc3_request(request); 1044 struct dwc3_ep *dep = to_dwc3_ep(ep); 1045 struct dwc3 *dwc = dep->dwc; 1046 1047 unsigned long flags; 1048 1049 int ret; 1050 1051 if (!dep->endpoint.desc) { 1052 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1053 request, ep->name); 1054 return -ESHUTDOWN; 1055 } 1056 1057 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1058 request, ep->name, request->length); 1059 1060 spin_lock_irqsave(&dwc->lock, flags); 1061 ret = __dwc3_gadget_ep_queue(dep, req); 1062 spin_unlock_irqrestore(&dwc->lock, flags); 1063 1064 return ret; 1065 } 1066 1067 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1068 struct usb_request *request) 1069 { 1070 struct dwc3_request *req = to_dwc3_request(request); 1071 struct dwc3_request *r = NULL; 1072 1073 struct dwc3_ep *dep = to_dwc3_ep(ep); 1074 struct dwc3 *dwc = dep->dwc; 1075 1076 unsigned long flags; 1077 int ret = 0; 1078 1079 spin_lock_irqsave(&dwc->lock, flags); 1080 1081 list_for_each_entry(r, &dep->request_list, list) { 1082 if (r == req) 1083 break; 1084 } 1085 1086 if (r != req) { 1087 list_for_each_entry(r, &dep->req_queued, list) { 1088 if (r == req) 1089 break; 1090 } 1091 if (r == req) { 1092 /* wait until it is processed */ 1093 dwc3_stop_active_transfer(dwc, dep->number); 1094 goto out0; 1095 } 1096 dev_err(dwc->dev, "request %p was not queued to %s\n", 1097 request, ep->name); 1098 ret = -EINVAL; 1099 goto out0; 1100 } 1101 1102 /* giveback the request */ 1103 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1104 1105 out0: 1106 spin_unlock_irqrestore(&dwc->lock, flags); 1107 1108 return ret; 1109 } 1110 1111 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 1112 { 1113 struct dwc3_gadget_ep_cmd_params params; 1114 struct dwc3 *dwc = dep->dwc; 1115 int ret; 1116 1117 memset(¶ms, 0x00, sizeof(params)); 1118 1119 if (value) { 1120 if (dep->number == 0 || dep->number == 1) { 1121 /* 1122 * Whenever EP0 is stalled, we will restart 1123 * the state machine, thus moving back to 1124 * Setup Phase 1125 */ 1126 dwc->ep0state = EP0_SETUP_PHASE; 1127 } 1128 1129 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1130 DWC3_DEPCMD_SETSTALL, ¶ms); 1131 if (ret) 1132 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1133 value ? "set" : "clear", 1134 dep->name); 1135 else 1136 dep->flags |= DWC3_EP_STALL; 1137 } else { 1138 if (dep->flags & DWC3_EP_WEDGE) 1139 return 0; 1140 1141 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1142 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1143 if (ret) 1144 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1145 value ? "set" : "clear", 1146 dep->name); 1147 else 1148 dep->flags &= ~DWC3_EP_STALL; 1149 } 1150 1151 return ret; 1152 } 1153 1154 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1155 { 1156 struct dwc3_ep *dep = to_dwc3_ep(ep); 1157 struct dwc3 *dwc = dep->dwc; 1158 1159 unsigned long flags; 1160 1161 int ret; 1162 1163 spin_lock_irqsave(&dwc->lock, flags); 1164 1165 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1166 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1167 ret = -EINVAL; 1168 goto out; 1169 } 1170 1171 ret = __dwc3_gadget_ep_set_halt(dep, value); 1172 out: 1173 spin_unlock_irqrestore(&dwc->lock, flags); 1174 1175 return ret; 1176 } 1177 1178 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1179 { 1180 struct dwc3_ep *dep = to_dwc3_ep(ep); 1181 struct dwc3 *dwc = dep->dwc; 1182 unsigned long flags; 1183 1184 spin_lock_irqsave(&dwc->lock, flags); 1185 dep->flags |= DWC3_EP_WEDGE; 1186 spin_unlock_irqrestore(&dwc->lock, flags); 1187 1188 return dwc3_gadget_ep_set_halt(ep, 1); 1189 } 1190 1191 /* -------------------------------------------------------------------------- */ 1192 1193 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1194 .bLength = USB_DT_ENDPOINT_SIZE, 1195 .bDescriptorType = USB_DT_ENDPOINT, 1196 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1197 }; 1198 1199 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1200 .enable = dwc3_gadget_ep0_enable, 1201 .disable = dwc3_gadget_ep0_disable, 1202 .alloc_request = dwc3_gadget_ep_alloc_request, 1203 .free_request = dwc3_gadget_ep_free_request, 1204 .queue = dwc3_gadget_ep0_queue, 1205 .dequeue = dwc3_gadget_ep_dequeue, 1206 .set_halt = dwc3_gadget_ep_set_halt, 1207 .set_wedge = dwc3_gadget_ep_set_wedge, 1208 }; 1209 1210 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1211 .enable = dwc3_gadget_ep_enable, 1212 .disable = dwc3_gadget_ep_disable, 1213 .alloc_request = dwc3_gadget_ep_alloc_request, 1214 .free_request = dwc3_gadget_ep_free_request, 1215 .queue = dwc3_gadget_ep_queue, 1216 .dequeue = dwc3_gadget_ep_dequeue, 1217 .set_halt = dwc3_gadget_ep_set_halt, 1218 .set_wedge = dwc3_gadget_ep_set_wedge, 1219 }; 1220 1221 /* -------------------------------------------------------------------------- */ 1222 1223 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1224 { 1225 struct dwc3 *dwc = gadget_to_dwc(g); 1226 u32 reg; 1227 1228 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1229 return DWC3_DSTS_SOFFN(reg); 1230 } 1231 1232 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1233 { 1234 struct dwc3 *dwc = gadget_to_dwc(g); 1235 1236 unsigned long timeout; 1237 unsigned long flags; 1238 1239 u32 reg; 1240 1241 int ret = 0; 1242 1243 u8 link_state; 1244 u8 speed; 1245 1246 spin_lock_irqsave(&dwc->lock, flags); 1247 1248 /* 1249 * According to the Databook Remote wakeup request should 1250 * be issued only when the device is in early suspend state. 1251 * 1252 * We can check that via USB Link State bits in DSTS register. 1253 */ 1254 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1255 1256 speed = reg & DWC3_DSTS_CONNECTSPD; 1257 if (speed == DWC3_DSTS_SUPERSPEED) { 1258 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1259 ret = -EINVAL; 1260 goto out; 1261 } 1262 1263 link_state = DWC3_DSTS_USBLNKST(reg); 1264 1265 switch (link_state) { 1266 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1267 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1268 break; 1269 default: 1270 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1271 link_state); 1272 ret = -EINVAL; 1273 goto out; 1274 } 1275 1276 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1277 if (ret < 0) { 1278 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1279 goto out; 1280 } 1281 1282 /* write zeroes to Link Change Request */ 1283 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1284 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1285 1286 /* poll until Link State changes to ON */ 1287 timeout = jiffies + msecs_to_jiffies(100); 1288 1289 while (!time_after(jiffies, timeout)) { 1290 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1291 1292 /* in HS, means ON */ 1293 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1294 break; 1295 } 1296 1297 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1298 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1299 ret = -EINVAL; 1300 } 1301 1302 out: 1303 spin_unlock_irqrestore(&dwc->lock, flags); 1304 1305 return ret; 1306 } 1307 1308 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1309 int is_selfpowered) 1310 { 1311 struct dwc3 *dwc = gadget_to_dwc(g); 1312 unsigned long flags; 1313 1314 spin_lock_irqsave(&dwc->lock, flags); 1315 dwc->is_selfpowered = !!is_selfpowered; 1316 spin_unlock_irqrestore(&dwc->lock, flags); 1317 1318 return 0; 1319 } 1320 1321 static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) 1322 { 1323 u32 reg; 1324 u32 timeout = 500; 1325 1326 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1327 if (is_on) { 1328 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1329 reg |= (DWC3_DCTL_RUN_STOP 1330 | DWC3_DCTL_TRGTULST_RX_DET); 1331 } else { 1332 reg &= ~DWC3_DCTL_RUN_STOP; 1333 } 1334 1335 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1336 1337 do { 1338 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1339 if (is_on) { 1340 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1341 break; 1342 } else { 1343 if (reg & DWC3_DSTS_DEVCTRLHLT) 1344 break; 1345 } 1346 timeout--; 1347 if (!timeout) 1348 break; 1349 udelay(1); 1350 } while (1); 1351 1352 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1353 dwc->gadget_driver 1354 ? dwc->gadget_driver->function : "no-function", 1355 is_on ? "connect" : "disconnect"); 1356 } 1357 1358 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1359 { 1360 struct dwc3 *dwc = gadget_to_dwc(g); 1361 unsigned long flags; 1362 1363 is_on = !!is_on; 1364 1365 spin_lock_irqsave(&dwc->lock, flags); 1366 dwc3_gadget_run_stop(dwc, is_on); 1367 spin_unlock_irqrestore(&dwc->lock, flags); 1368 1369 return 0; 1370 } 1371 1372 static int dwc3_gadget_start(struct usb_gadget *g, 1373 struct usb_gadget_driver *driver) 1374 { 1375 struct dwc3 *dwc = gadget_to_dwc(g); 1376 struct dwc3_ep *dep; 1377 unsigned long flags; 1378 int ret = 0; 1379 u32 reg; 1380 1381 spin_lock_irqsave(&dwc->lock, flags); 1382 1383 if (dwc->gadget_driver) { 1384 dev_err(dwc->dev, "%s is already bound to %s\n", 1385 dwc->gadget.name, 1386 dwc->gadget_driver->driver.name); 1387 ret = -EBUSY; 1388 goto err0; 1389 } 1390 1391 dwc->gadget_driver = driver; 1392 dwc->gadget.dev.driver = &driver->driver; 1393 1394 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1395 reg &= ~(DWC3_DCFG_SPEED_MASK); 1396 1397 /** 1398 * WORKAROUND: DWC3 revision < 2.20a have an issue 1399 * which would cause metastability state on Run/Stop 1400 * bit if we try to force the IP to USB2-only mode. 1401 * 1402 * Because of that, we cannot configure the IP to any 1403 * speed other than the SuperSpeed 1404 * 1405 * Refers to: 1406 * 1407 * STAR#9000525659: Clock Domain Crossing on DCTL in 1408 * USB 2.0 Mode 1409 */ 1410 if (dwc->revision < DWC3_REVISION_220A) 1411 reg |= DWC3_DCFG_SUPERSPEED; 1412 else 1413 reg |= dwc->maximum_speed; 1414 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1415 1416 dwc->start_config_issued = false; 1417 1418 /* Start with SuperSpeed Default */ 1419 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1420 1421 dep = dwc->eps[0]; 1422 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1423 if (ret) { 1424 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1425 goto err0; 1426 } 1427 1428 dep = dwc->eps[1]; 1429 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 1430 if (ret) { 1431 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1432 goto err1; 1433 } 1434 1435 /* begin to receive SETUP packets */ 1436 dwc->ep0state = EP0_SETUP_PHASE; 1437 dwc3_ep0_out_start(dwc); 1438 1439 spin_unlock_irqrestore(&dwc->lock, flags); 1440 1441 return 0; 1442 1443 err1: 1444 __dwc3_gadget_ep_disable(dwc->eps[0]); 1445 1446 err0: 1447 spin_unlock_irqrestore(&dwc->lock, flags); 1448 1449 return ret; 1450 } 1451 1452 static int dwc3_gadget_stop(struct usb_gadget *g, 1453 struct usb_gadget_driver *driver) 1454 { 1455 struct dwc3 *dwc = gadget_to_dwc(g); 1456 unsigned long flags; 1457 1458 spin_lock_irqsave(&dwc->lock, flags); 1459 1460 __dwc3_gadget_ep_disable(dwc->eps[0]); 1461 __dwc3_gadget_ep_disable(dwc->eps[1]); 1462 1463 dwc->gadget_driver = NULL; 1464 dwc->gadget.dev.driver = NULL; 1465 1466 spin_unlock_irqrestore(&dwc->lock, flags); 1467 1468 return 0; 1469 } 1470 static const struct usb_gadget_ops dwc3_gadget_ops = { 1471 .get_frame = dwc3_gadget_get_frame, 1472 .wakeup = dwc3_gadget_wakeup, 1473 .set_selfpowered = dwc3_gadget_set_selfpowered, 1474 .pullup = dwc3_gadget_pullup, 1475 .udc_start = dwc3_gadget_start, 1476 .udc_stop = dwc3_gadget_stop, 1477 }; 1478 1479 /* -------------------------------------------------------------------------- */ 1480 1481 static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1482 { 1483 struct dwc3_ep *dep; 1484 u8 epnum; 1485 1486 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1487 1488 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1489 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1490 if (!dep) { 1491 dev_err(dwc->dev, "can't allocate endpoint %d\n", 1492 epnum); 1493 return -ENOMEM; 1494 } 1495 1496 dep->dwc = dwc; 1497 dep->number = epnum; 1498 dwc->eps[epnum] = dep; 1499 1500 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1501 (epnum & 1) ? "in" : "out"); 1502 dep->endpoint.name = dep->name; 1503 dep->direction = (epnum & 1); 1504 1505 if (epnum == 0 || epnum == 1) { 1506 dep->endpoint.maxpacket = 512; 1507 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1508 if (!epnum) 1509 dwc->gadget.ep0 = &dep->endpoint; 1510 } else { 1511 int ret; 1512 1513 dep->endpoint.maxpacket = 1024; 1514 dep->endpoint.max_streams = 15; 1515 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1516 list_add_tail(&dep->endpoint.ep_list, 1517 &dwc->gadget.ep_list); 1518 1519 ret = dwc3_alloc_trb_pool(dep); 1520 if (ret) 1521 return ret; 1522 } 1523 1524 INIT_LIST_HEAD(&dep->request_list); 1525 INIT_LIST_HEAD(&dep->req_queued); 1526 } 1527 1528 return 0; 1529 } 1530 1531 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1532 { 1533 struct dwc3_ep *dep; 1534 u8 epnum; 1535 1536 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1537 dep = dwc->eps[epnum]; 1538 dwc3_free_trb_pool(dep); 1539 1540 if (epnum != 0 && epnum != 1) 1541 list_del(&dep->endpoint.ep_list); 1542 1543 kfree(dep); 1544 } 1545 } 1546 1547 static void dwc3_gadget_release(struct device *dev) 1548 { 1549 dev_dbg(dev, "%s\n", __func__); 1550 } 1551 1552 /* -------------------------------------------------------------------------- */ 1553 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1554 const struct dwc3_event_depevt *event, int status) 1555 { 1556 struct dwc3_request *req; 1557 struct dwc3_trb *trb; 1558 unsigned int count; 1559 unsigned int s_pkt = 0; 1560 1561 do { 1562 req = next_request(&dep->req_queued); 1563 if (!req) { 1564 WARN_ON_ONCE(1); 1565 return 1; 1566 } 1567 1568 trb = req->trb; 1569 1570 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1571 /* 1572 * We continue despite the error. There is not much we 1573 * can do. If we don't clean it up we loop forever. If 1574 * we skip the TRB then it gets overwritten after a 1575 * while since we use them in a ring buffer. A BUG() 1576 * would help. Lets hope that if this occurs, someone 1577 * fixes the root cause instead of looking away :) 1578 */ 1579 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1580 dep->name, req->trb); 1581 count = trb->size & DWC3_TRB_SIZE_MASK; 1582 1583 if (dep->direction) { 1584 if (count) { 1585 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1586 dep->name); 1587 status = -ECONNRESET; 1588 } 1589 } else { 1590 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1591 s_pkt = 1; 1592 } 1593 1594 /* 1595 * We assume here we will always receive the entire data block 1596 * which we should receive. Meaning, if we program RX to 1597 * receive 4K but we receive only 2K, we assume that's all we 1598 * should receive and we simply bounce the request back to the 1599 * gadget driver for further processing. 1600 */ 1601 req->request.actual += req->request.length - count; 1602 dwc3_gadget_giveback(dep, req, status); 1603 if (s_pkt) 1604 break; 1605 if ((event->status & DEPEVT_STATUS_LST) && 1606 (trb->ctrl & DWC3_TRB_CTRL_LST)) 1607 break; 1608 if ((event->status & DEPEVT_STATUS_IOC) && 1609 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1610 break; 1611 } while (1); 1612 1613 if ((event->status & DEPEVT_STATUS_IOC) && 1614 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1615 return 0; 1616 return 1; 1617 } 1618 1619 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1620 struct dwc3_ep *dep, const struct dwc3_event_depevt *event, 1621 int start_new) 1622 { 1623 unsigned status = 0; 1624 int clean_busy; 1625 1626 if (event->status & DEPEVT_STATUS_BUSERR) 1627 status = -ECONNRESET; 1628 1629 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1630 if (clean_busy) 1631 dep->flags &= ~DWC3_EP_BUSY; 1632 1633 /* 1634 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1635 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1636 */ 1637 if (dwc->revision < DWC3_REVISION_183A) { 1638 u32 reg; 1639 int i; 1640 1641 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1642 struct dwc3_ep *dep = dwc->eps[i]; 1643 1644 if (!(dep->flags & DWC3_EP_ENABLED)) 1645 continue; 1646 1647 if (!list_empty(&dep->req_queued)) 1648 return; 1649 } 1650 1651 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1652 reg |= dwc->u1u2; 1653 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1654 1655 dwc->u1u2 = 0; 1656 } 1657 } 1658 1659 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1660 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1661 { 1662 u32 uf, mask; 1663 1664 if (list_empty(&dep->request_list)) { 1665 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1666 dep->name); 1667 return; 1668 } 1669 1670 mask = ~(dep->interval - 1); 1671 uf = event->parameters & mask; 1672 /* 4 micro frames in the future */ 1673 uf += dep->interval * 4; 1674 1675 __dwc3_gadget_kick_transfer(dep, uf, 1); 1676 } 1677 1678 static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep, 1679 const struct dwc3_event_depevt *event) 1680 { 1681 struct dwc3 *dwc = dep->dwc; 1682 struct dwc3_event_depevt mod_ev = *event; 1683 1684 /* 1685 * We were asked to remove one request. It is possible that this 1686 * request and a few others were started together and have the same 1687 * transfer index. Since we stopped the complete endpoint we don't 1688 * know how many requests were already completed (and not yet) 1689 * reported and how could be done (later). We purge them all until 1690 * the end of the list. 1691 */ 1692 mod_ev.status = DEPEVT_STATUS_LST; 1693 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN); 1694 dep->flags &= ~DWC3_EP_BUSY; 1695 /* pending requests are ignored and are queued on XferNotReady */ 1696 } 1697 1698 static void dwc3_ep_cmd_compl(struct dwc3_ep *dep, 1699 const struct dwc3_event_depevt *event) 1700 { 1701 u32 param = event->parameters; 1702 u32 cmd_type = (param >> 8) & ((1 << 5) - 1); 1703 1704 switch (cmd_type) { 1705 case DWC3_DEPCMD_ENDTRANSFER: 1706 dwc3_process_ep_cmd_complete(dep, event); 1707 break; 1708 case DWC3_DEPCMD_STARTTRANSFER: 1709 dep->res_trans_idx = param & 0x7f; 1710 break; 1711 default: 1712 printk(KERN_ERR "%s() unknown /unexpected type: %d\n", 1713 __func__, cmd_type); 1714 break; 1715 }; 1716 } 1717 1718 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1719 const struct dwc3_event_depevt *event) 1720 { 1721 struct dwc3_ep *dep; 1722 u8 epnum = event->endpoint_number; 1723 1724 dep = dwc->eps[epnum]; 1725 1726 dev_vdbg(dwc->dev, "%s: %s\n", dep->name, 1727 dwc3_ep_event_string(event->endpoint_event)); 1728 1729 if (epnum == 0 || epnum == 1) { 1730 dwc3_ep0_interrupt(dwc, event); 1731 return; 1732 } 1733 1734 switch (event->endpoint_event) { 1735 case DWC3_DEPEVT_XFERCOMPLETE: 1736 dep->res_trans_idx = 0; 1737 1738 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1739 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1740 dep->name); 1741 return; 1742 } 1743 1744 dwc3_endpoint_transfer_complete(dwc, dep, event, 1); 1745 break; 1746 case DWC3_DEPEVT_XFERINPROGRESS: 1747 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1748 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", 1749 dep->name); 1750 return; 1751 } 1752 1753 dwc3_endpoint_transfer_complete(dwc, dep, event, 0); 1754 break; 1755 case DWC3_DEPEVT_XFERNOTREADY: 1756 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1757 dwc3_gadget_start_isoc(dwc, dep, event); 1758 } else { 1759 int ret; 1760 1761 dev_vdbg(dwc->dev, "%s: reason %s\n", 1762 dep->name, event->status & 1763 DEPEVT_STATUS_TRANSFER_ACTIVE 1764 ? "Transfer Active" 1765 : "Transfer Not Active"); 1766 1767 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1768 if (!ret || ret == -EBUSY) 1769 return; 1770 1771 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1772 dep->name); 1773 } 1774 1775 break; 1776 case DWC3_DEPEVT_STREAMEVT: 1777 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1778 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1779 dep->name); 1780 return; 1781 } 1782 1783 switch (event->status) { 1784 case DEPEVT_STREAMEVT_FOUND: 1785 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1786 event->parameters); 1787 1788 break; 1789 case DEPEVT_STREAMEVT_NOTFOUND: 1790 /* FALLTHROUGH */ 1791 default: 1792 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 1793 } 1794 break; 1795 case DWC3_DEPEVT_RXTXFIFOEVT: 1796 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1797 break; 1798 case DWC3_DEPEVT_EPCMDCMPLT: 1799 dwc3_ep_cmd_compl(dep, event); 1800 break; 1801 } 1802 } 1803 1804 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1805 { 1806 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1807 spin_unlock(&dwc->lock); 1808 dwc->gadget_driver->disconnect(&dwc->gadget); 1809 spin_lock(&dwc->lock); 1810 } 1811 } 1812 1813 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) 1814 { 1815 struct dwc3_ep *dep; 1816 struct dwc3_gadget_ep_cmd_params params; 1817 u32 cmd; 1818 int ret; 1819 1820 dep = dwc->eps[epnum]; 1821 1822 WARN_ON(!dep->res_trans_idx); 1823 if (dep->res_trans_idx) { 1824 cmd = DWC3_DEPCMD_ENDTRANSFER; 1825 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; 1826 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx); 1827 memset(¶ms, 0, sizeof(params)); 1828 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1829 WARN_ON_ONCE(ret); 1830 dep->res_trans_idx = 0; 1831 } 1832 } 1833 1834 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 1835 { 1836 u32 epnum; 1837 1838 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1839 struct dwc3_ep *dep; 1840 1841 dep = dwc->eps[epnum]; 1842 if (!(dep->flags & DWC3_EP_ENABLED)) 1843 continue; 1844 1845 dwc3_remove_requests(dwc, dep); 1846 } 1847 } 1848 1849 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 1850 { 1851 u32 epnum; 1852 1853 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1854 struct dwc3_ep *dep; 1855 struct dwc3_gadget_ep_cmd_params params; 1856 int ret; 1857 1858 dep = dwc->eps[epnum]; 1859 1860 if (!(dep->flags & DWC3_EP_STALL)) 1861 continue; 1862 1863 dep->flags &= ~DWC3_EP_STALL; 1864 1865 memset(¶ms, 0, sizeof(params)); 1866 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1867 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1868 WARN_ON_ONCE(ret); 1869 } 1870 } 1871 1872 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 1873 { 1874 dev_vdbg(dwc->dev, "%s\n", __func__); 1875 #if 0 1876 XXX 1877 U1/U2 is powersave optimization. Skip it for now. Anyway we need to 1878 enable it before we can disable it. 1879 1880 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1881 reg &= ~DWC3_DCTL_INITU1ENA; 1882 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1883 1884 reg &= ~DWC3_DCTL_INITU2ENA; 1885 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1886 #endif 1887 1888 dwc3_stop_active_transfers(dwc); 1889 dwc3_disconnect_gadget(dwc); 1890 dwc->start_config_issued = false; 1891 1892 dwc->gadget.speed = USB_SPEED_UNKNOWN; 1893 dwc->setup_packet_pending = false; 1894 } 1895 1896 static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on) 1897 { 1898 u32 reg; 1899 1900 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 1901 1902 if (on) 1903 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 1904 else 1905 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 1906 1907 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 1908 } 1909 1910 static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on) 1911 { 1912 u32 reg; 1913 1914 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1915 1916 if (on) 1917 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 1918 else 1919 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 1920 1921 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1922 } 1923 1924 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 1925 { 1926 u32 reg; 1927 1928 dev_vdbg(dwc->dev, "%s\n", __func__); 1929 1930 /* 1931 * WORKAROUND: DWC3 revisions <1.88a have an issue which 1932 * would cause a missing Disconnect Event if there's a 1933 * pending Setup Packet in the FIFO. 1934 * 1935 * There's no suggested workaround on the official Bug 1936 * report, which states that "unless the driver/application 1937 * is doing any special handling of a disconnect event, 1938 * there is no functional issue". 1939 * 1940 * Unfortunately, it turns out that we _do_ some special 1941 * handling of a disconnect event, namely complete all 1942 * pending transfers, notify gadget driver of the 1943 * disconnection, and so on. 1944 * 1945 * Our suggested workaround is to follow the Disconnect 1946 * Event steps here, instead, based on a setup_packet_pending 1947 * flag. Such flag gets set whenever we have a XferNotReady 1948 * event on EP0 and gets cleared on XferComplete for the 1949 * same endpoint. 1950 * 1951 * Refers to: 1952 * 1953 * STAR#9000466709: RTL: Device : Disconnect event not 1954 * generated if setup packet pending in FIFO 1955 */ 1956 if (dwc->revision < DWC3_REVISION_188A) { 1957 if (dwc->setup_packet_pending) 1958 dwc3_gadget_disconnect_interrupt(dwc); 1959 } 1960 1961 /* after reset -> Default State */ 1962 dwc->dev_state = DWC3_DEFAULT_STATE; 1963 1964 /* Enable PHYs */ 1965 dwc3_gadget_usb2_phy_power(dwc, true); 1966 dwc3_gadget_usb3_phy_power(dwc, true); 1967 1968 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 1969 dwc3_disconnect_gadget(dwc); 1970 1971 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1972 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 1973 reg &= ~(DWC3_DCTL_INITU1ENA | DWC3_DCTL_INITU2ENA); 1974 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1975 dwc->test_mode = false; 1976 1977 dwc3_stop_active_transfers(dwc); 1978 dwc3_clear_stall_all_ep(dwc); 1979 dwc->start_config_issued = false; 1980 1981 /* Reset device address to zero */ 1982 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1983 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 1984 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1985 } 1986 1987 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 1988 { 1989 u32 reg; 1990 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 1991 1992 /* 1993 * We change the clock only at SS but I dunno why I would want to do 1994 * this. Maybe it becomes part of the power saving plan. 1995 */ 1996 1997 if (speed != DWC3_DSTS_SUPERSPEED) 1998 return; 1999 2000 /* 2001 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2002 * each time on Connect Done. 2003 */ 2004 if (!usb30_clock) 2005 return; 2006 2007 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2008 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2009 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2010 } 2011 2012 static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed) 2013 { 2014 switch (speed) { 2015 case USB_SPEED_SUPER: 2016 dwc3_gadget_usb2_phy_power(dwc, false); 2017 break; 2018 case USB_SPEED_HIGH: 2019 case USB_SPEED_FULL: 2020 case USB_SPEED_LOW: 2021 dwc3_gadget_usb3_phy_power(dwc, false); 2022 break; 2023 } 2024 } 2025 2026 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2027 { 2028 struct dwc3_gadget_ep_cmd_params params; 2029 struct dwc3_ep *dep; 2030 int ret; 2031 u32 reg; 2032 u8 speed; 2033 2034 dev_vdbg(dwc->dev, "%s\n", __func__); 2035 2036 memset(¶ms, 0x00, sizeof(params)); 2037 2038 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2039 speed = reg & DWC3_DSTS_CONNECTSPD; 2040 dwc->speed = speed; 2041 2042 dwc3_update_ram_clk_sel(dwc, speed); 2043 2044 switch (speed) { 2045 case DWC3_DCFG_SUPERSPEED: 2046 /* 2047 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2048 * would cause a missing USB3 Reset event. 2049 * 2050 * In such situations, we should force a USB3 Reset 2051 * event by calling our dwc3_gadget_reset_interrupt() 2052 * routine. 2053 * 2054 * Refers to: 2055 * 2056 * STAR#9000483510: RTL: SS : USB3 reset event may 2057 * not be generated always when the link enters poll 2058 */ 2059 if (dwc->revision < DWC3_REVISION_190A) 2060 dwc3_gadget_reset_interrupt(dwc); 2061 2062 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2063 dwc->gadget.ep0->maxpacket = 512; 2064 dwc->gadget.speed = USB_SPEED_SUPER; 2065 break; 2066 case DWC3_DCFG_HIGHSPEED: 2067 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2068 dwc->gadget.ep0->maxpacket = 64; 2069 dwc->gadget.speed = USB_SPEED_HIGH; 2070 break; 2071 case DWC3_DCFG_FULLSPEED2: 2072 case DWC3_DCFG_FULLSPEED1: 2073 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2074 dwc->gadget.ep0->maxpacket = 64; 2075 dwc->gadget.speed = USB_SPEED_FULL; 2076 break; 2077 case DWC3_DCFG_LOWSPEED: 2078 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2079 dwc->gadget.ep0->maxpacket = 8; 2080 dwc->gadget.speed = USB_SPEED_LOW; 2081 break; 2082 } 2083 2084 /* Disable unneded PHY */ 2085 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed); 2086 2087 dep = dwc->eps[0]; 2088 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 2089 if (ret) { 2090 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2091 return; 2092 } 2093 2094 dep = dwc->eps[1]; 2095 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); 2096 if (ret) { 2097 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2098 return; 2099 } 2100 2101 /* 2102 * Configure PHY via GUSB3PIPECTLn if required. 2103 * 2104 * Update GTXFIFOSIZn 2105 * 2106 * In both cases reset values should be sufficient. 2107 */ 2108 } 2109 2110 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2111 { 2112 dev_vdbg(dwc->dev, "%s\n", __func__); 2113 2114 /* 2115 * TODO take core out of low power mode when that's 2116 * implemented. 2117 */ 2118 2119 dwc->gadget_driver->resume(&dwc->gadget); 2120 } 2121 2122 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2123 unsigned int evtinfo) 2124 { 2125 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2126 2127 /* 2128 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2129 * on the link partner, the USB session might do multiple entry/exit 2130 * of low power states before a transfer takes place. 2131 * 2132 * Due to this problem, we might experience lower throughput. The 2133 * suggested workaround is to disable DCTL[12:9] bits if we're 2134 * transitioning from U1/U2 to U0 and enable those bits again 2135 * after a transfer completes and there are no pending transfers 2136 * on any of the enabled endpoints. 2137 * 2138 * This is the first half of that workaround. 2139 * 2140 * Refers to: 2141 * 2142 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2143 * core send LGO_Ux entering U0 2144 */ 2145 if (dwc->revision < DWC3_REVISION_183A) { 2146 if (next == DWC3_LINK_STATE_U0) { 2147 u32 u1u2; 2148 u32 reg; 2149 2150 switch (dwc->link_state) { 2151 case DWC3_LINK_STATE_U1: 2152 case DWC3_LINK_STATE_U2: 2153 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2154 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2155 | DWC3_DCTL_ACCEPTU2ENA 2156 | DWC3_DCTL_INITU1ENA 2157 | DWC3_DCTL_ACCEPTU1ENA); 2158 2159 if (!dwc->u1u2) 2160 dwc->u1u2 = reg & u1u2; 2161 2162 reg &= ~u1u2; 2163 2164 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2165 break; 2166 default: 2167 /* do nothing */ 2168 break; 2169 } 2170 } 2171 } 2172 2173 dwc->link_state = next; 2174 2175 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state); 2176 } 2177 2178 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2179 const struct dwc3_event_devt *event) 2180 { 2181 switch (event->type) { 2182 case DWC3_DEVICE_EVENT_DISCONNECT: 2183 dwc3_gadget_disconnect_interrupt(dwc); 2184 break; 2185 case DWC3_DEVICE_EVENT_RESET: 2186 dwc3_gadget_reset_interrupt(dwc); 2187 break; 2188 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2189 dwc3_gadget_conndone_interrupt(dwc); 2190 break; 2191 case DWC3_DEVICE_EVENT_WAKEUP: 2192 dwc3_gadget_wakeup_interrupt(dwc); 2193 break; 2194 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2195 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2196 break; 2197 case DWC3_DEVICE_EVENT_EOPF: 2198 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2199 break; 2200 case DWC3_DEVICE_EVENT_SOF: 2201 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2202 break; 2203 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2204 dev_vdbg(dwc->dev, "Erratic Error\n"); 2205 break; 2206 case DWC3_DEVICE_EVENT_CMD_CMPL: 2207 dev_vdbg(dwc->dev, "Command Complete\n"); 2208 break; 2209 case DWC3_DEVICE_EVENT_OVERFLOW: 2210 dev_vdbg(dwc->dev, "Overflow\n"); 2211 break; 2212 default: 2213 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2214 } 2215 } 2216 2217 static void dwc3_process_event_entry(struct dwc3 *dwc, 2218 const union dwc3_event *event) 2219 { 2220 /* Endpoint IRQ, handle it and return early */ 2221 if (event->type.is_devspec == 0) { 2222 /* depevt */ 2223 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2224 } 2225 2226 switch (event->type.type) { 2227 case DWC3_EVENT_TYPE_DEV: 2228 dwc3_gadget_interrupt(dwc, &event->devt); 2229 break; 2230 /* REVISIT what to do with Carkit and I2C events ? */ 2231 default: 2232 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2233 } 2234 } 2235 2236 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2237 { 2238 struct dwc3_event_buffer *evt; 2239 int left; 2240 u32 count; 2241 2242 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2243 count &= DWC3_GEVNTCOUNT_MASK; 2244 if (!count) 2245 return IRQ_NONE; 2246 2247 evt = dwc->ev_buffs[buf]; 2248 left = count; 2249 2250 while (left > 0) { 2251 union dwc3_event event; 2252 2253 event.raw = *(u32 *) (evt->buf + evt->lpos); 2254 2255 dwc3_process_event_entry(dwc, &event); 2256 /* 2257 * XXX we wrap around correctly to the next entry as almost all 2258 * entries are 4 bytes in size. There is one entry which has 12 2259 * bytes which is a regular entry followed by 8 bytes data. ATM 2260 * I don't know how things are organized if were get next to the 2261 * a boundary so I worry about that once we try to handle that. 2262 */ 2263 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2264 left -= 4; 2265 2266 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2267 } 2268 2269 return IRQ_HANDLED; 2270 } 2271 2272 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2273 { 2274 struct dwc3 *dwc = _dwc; 2275 int i; 2276 irqreturn_t ret = IRQ_NONE; 2277 2278 spin_lock(&dwc->lock); 2279 2280 for (i = 0; i < dwc->num_event_buffers; i++) { 2281 irqreturn_t status; 2282 2283 status = dwc3_process_event_buf(dwc, i); 2284 if (status == IRQ_HANDLED) 2285 ret = status; 2286 } 2287 2288 spin_unlock(&dwc->lock); 2289 2290 return ret; 2291 } 2292 2293 /** 2294 * dwc3_gadget_init - Initializes gadget related registers 2295 * @dwc: pointer to our controller context structure 2296 * 2297 * Returns 0 on success otherwise negative errno. 2298 */ 2299 int __devinit dwc3_gadget_init(struct dwc3 *dwc) 2300 { 2301 u32 reg; 2302 int ret; 2303 int irq; 2304 2305 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2306 &dwc->ctrl_req_addr, GFP_KERNEL); 2307 if (!dwc->ctrl_req) { 2308 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2309 ret = -ENOMEM; 2310 goto err0; 2311 } 2312 2313 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2314 &dwc->ep0_trb_addr, GFP_KERNEL); 2315 if (!dwc->ep0_trb) { 2316 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2317 ret = -ENOMEM; 2318 goto err1; 2319 } 2320 2321 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2322 if (!dwc->setup_buf) { 2323 dev_err(dwc->dev, "failed to allocate setup buffer\n"); 2324 ret = -ENOMEM; 2325 goto err2; 2326 } 2327 2328 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2329 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2330 GFP_KERNEL); 2331 if (!dwc->ep0_bounce) { 2332 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2333 ret = -ENOMEM; 2334 goto err3; 2335 } 2336 2337 dev_set_name(&dwc->gadget.dev, "gadget"); 2338 2339 dwc->gadget.ops = &dwc3_gadget_ops; 2340 dwc->gadget.max_speed = USB_SPEED_SUPER; 2341 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2342 dwc->gadget.dev.parent = dwc->dev; 2343 dwc->gadget.sg_supported = true; 2344 2345 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); 2346 2347 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms; 2348 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask; 2349 dwc->gadget.dev.release = dwc3_gadget_release; 2350 dwc->gadget.name = "dwc3-gadget"; 2351 2352 /* 2353 * REVISIT: Here we should clear all pending IRQs to be 2354 * sure we're starting from a well known location. 2355 */ 2356 2357 ret = dwc3_gadget_init_endpoints(dwc); 2358 if (ret) 2359 goto err4; 2360 2361 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 2362 2363 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED, 2364 "dwc3", dwc); 2365 if (ret) { 2366 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2367 irq, ret); 2368 goto err5; 2369 } 2370 2371 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2372 reg |= DWC3_DCFG_LPM_CAP; 2373 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2374 2375 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2376 reg |= DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA; 2377 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2378 2379 /* Enable all but Start and End of Frame IRQs */ 2380 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 2381 DWC3_DEVTEN_EVNTOVERFLOWEN | 2382 DWC3_DEVTEN_CMDCMPLTEN | 2383 DWC3_DEVTEN_ERRTICERREN | 2384 DWC3_DEVTEN_WKUPEVTEN | 2385 DWC3_DEVTEN_ULSTCNGEN | 2386 DWC3_DEVTEN_CONNECTDONEEN | 2387 DWC3_DEVTEN_USBRSTEN | 2388 DWC3_DEVTEN_DISCONNEVTEN); 2389 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 2390 2391 ret = device_register(&dwc->gadget.dev); 2392 if (ret) { 2393 dev_err(dwc->dev, "failed to register gadget device\n"); 2394 put_device(&dwc->gadget.dev); 2395 goto err6; 2396 } 2397 2398 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2399 if (ret) { 2400 dev_err(dwc->dev, "failed to register udc\n"); 2401 goto err7; 2402 } 2403 2404 return 0; 2405 2406 err7: 2407 device_unregister(&dwc->gadget.dev); 2408 2409 err6: 2410 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2411 free_irq(irq, dwc); 2412 2413 err5: 2414 dwc3_gadget_free_endpoints(dwc); 2415 2416 err4: 2417 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2418 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2419 2420 err3: 2421 kfree(dwc->setup_buf); 2422 2423 err2: 2424 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2425 dwc->ep0_trb, dwc->ep0_trb_addr); 2426 2427 err1: 2428 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2429 dwc->ctrl_req, dwc->ctrl_req_addr); 2430 2431 err0: 2432 return ret; 2433 } 2434 2435 void dwc3_gadget_exit(struct dwc3 *dwc) 2436 { 2437 int irq; 2438 2439 usb_del_gadget_udc(&dwc->gadget); 2440 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 2441 2442 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2443 free_irq(irq, dwc); 2444 2445 dwc3_gadget_free_endpoints(dwc); 2446 2447 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2448 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2449 2450 kfree(dwc->setup_buf); 2451 2452 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2453 dwc->ep0_trb, dwc->ep0_trb_addr); 2454 2455 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2456 dwc->ctrl_req, dwc->ctrl_req_addr); 2457 2458 device_unregister(&dwc->gadget.dev); 2459 } 2460