1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 25 #include "debug.h" 26 #include "core.h" 27 #include "gadget.h" 28 #include "io.h" 29 30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ 31 & ~((d)->interval - 1)) 32 33 /** 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will return 0 on 39 * success or -EINVAL if wrong Test Selector is passed. 40 */ 41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 42 { 43 u32 reg; 44 45 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 46 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 47 48 switch (mode) { 49 case TEST_J: 50 case TEST_K: 51 case TEST_SE0_NAK: 52 case TEST_PACKET: 53 case TEST_FORCE_EN: 54 reg |= mode << 1; 55 break; 56 default: 57 return -EINVAL; 58 } 59 60 dwc3_gadget_dctl_write_safe(dwc, reg); 61 62 return 0; 63 } 64 65 /** 66 * dwc3_gadget_get_link_state - gets current state of usb link 67 * @dwc: pointer to our context structure 68 * 69 * Caller should take care of locking. This function will 70 * return the link state on success (>= 0) or -ETIMEDOUT. 71 */ 72 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 73 { 74 u32 reg; 75 76 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 77 78 return DWC3_DSTS_USBLNKST(reg); 79 } 80 81 /** 82 * dwc3_gadget_set_link_state - sets usb link to a particular state 83 * @dwc: pointer to our context structure 84 * @state: the state to put link into 85 * 86 * Caller should take care of locking. This function will 87 * return 0 on success or -ETIMEDOUT. 88 */ 89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 90 { 91 int retries = 10000; 92 u32 reg; 93 94 /* 95 * Wait until device controller is ready. Only applies to 1.94a and 96 * later RTL. 97 */ 98 if (dwc->revision >= DWC3_REVISION_194A) { 99 while (--retries) { 100 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 101 if (reg & DWC3_DSTS_DCNRD) 102 udelay(5); 103 else 104 break; 105 } 106 107 if (retries <= 0) 108 return -ETIMEDOUT; 109 } 110 111 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 112 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 113 114 /* set no action before sending new link state change */ 115 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 116 117 /* set requested state */ 118 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 119 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 120 121 /* 122 * The following code is racy when called from dwc3_gadget_wakeup, 123 * and is not needed, at least on newer versions 124 */ 125 if (dwc->revision >= DWC3_REVISION_194A) 126 return 0; 127 128 /* wait for a change in DSTS */ 129 retries = 10000; 130 while (--retries) { 131 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 132 133 if (DWC3_DSTS_USBLNKST(reg) == state) 134 return 0; 135 136 udelay(5); 137 } 138 139 return -ETIMEDOUT; 140 } 141 142 /** 143 * dwc3_ep_inc_trb - increment a trb index. 144 * @index: Pointer to the TRB index to increment. 145 * 146 * The index should never point to the link TRB. After incrementing, 147 * if it is point to the link TRB, wrap around to the beginning. The 148 * link TRB is always at the last TRB entry. 149 */ 150 static void dwc3_ep_inc_trb(u8 *index) 151 { 152 (*index)++; 153 if (*index == (DWC3_TRB_NUM - 1)) 154 *index = 0; 155 } 156 157 /** 158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 159 * @dep: The endpoint whose enqueue pointer we're incrementing 160 */ 161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 162 { 163 dwc3_ep_inc_trb(&dep->trb_enqueue); 164 } 165 166 /** 167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 168 * @dep: The endpoint whose enqueue pointer we're incrementing 169 */ 170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 171 { 172 dwc3_ep_inc_trb(&dep->trb_dequeue); 173 } 174 175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, 176 struct dwc3_request *req, int status) 177 { 178 struct dwc3 *dwc = dep->dwc; 179 180 list_del(&req->list); 181 req->remaining = 0; 182 req->needs_extra_trb = false; 183 184 if (req->request.status == -EINPROGRESS) 185 req->request.status = status; 186 187 if (req->trb) 188 usb_gadget_unmap_request_by_dev(dwc->sysdev, 189 &req->request, req->direction); 190 191 req->trb = NULL; 192 trace_dwc3_gadget_giveback(req); 193 194 if (dep->number > 1) 195 pm_runtime_put(dwc->dev); 196 } 197 198 /** 199 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 200 * @dep: The endpoint to whom the request belongs to 201 * @req: The request we're giving back 202 * @status: completion code for the request 203 * 204 * Must be called with controller's lock held and interrupts disabled. This 205 * function will unmap @req and call its ->complete() callback to notify upper 206 * layers that it has completed. 207 */ 208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 209 int status) 210 { 211 struct dwc3 *dwc = dep->dwc; 212 213 dwc3_gadget_del_and_unmap_request(dep, req, status); 214 req->status = DWC3_REQUEST_STATUS_COMPLETED; 215 216 spin_unlock(&dwc->lock); 217 usb_gadget_giveback_request(&dep->endpoint, &req->request); 218 spin_lock(&dwc->lock); 219 } 220 221 /** 222 * dwc3_send_gadget_generic_command - issue a generic command for the controller 223 * @dwc: pointer to the controller context 224 * @cmd: the command to be issued 225 * @param: command parameter 226 * 227 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 228 * and wait for its completion. 229 */ 230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 231 { 232 u32 timeout = 500; 233 int status = 0; 234 int ret = 0; 235 u32 reg; 236 237 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 238 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 239 240 do { 241 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 242 if (!(reg & DWC3_DGCMD_CMDACT)) { 243 status = DWC3_DGCMD_STATUS(reg); 244 if (status) 245 ret = -EINVAL; 246 break; 247 } 248 } while (--timeout); 249 250 if (!timeout) { 251 ret = -ETIMEDOUT; 252 status = -ETIMEDOUT; 253 } 254 255 trace_dwc3_gadget_generic_cmd(cmd, param, status); 256 257 return ret; 258 } 259 260 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 261 262 /** 263 * dwc3_send_gadget_ep_cmd - issue an endpoint command 264 * @dep: the endpoint to which the command is going to be issued 265 * @cmd: the command to be issued 266 * @params: parameters to the command 267 * 268 * Caller should handle locking. This function will issue @cmd with given 269 * @params to @dep and wait for its completion. 270 */ 271 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 272 struct dwc3_gadget_ep_cmd_params *params) 273 { 274 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 275 struct dwc3 *dwc = dep->dwc; 276 u32 timeout = 1000; 277 u32 saved_config = 0; 278 u32 reg; 279 280 int cmd_status = 0; 281 int ret = -EINVAL; 282 283 /* 284 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or 285 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an 286 * endpoint command. 287 * 288 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY 289 * settings. Restore them after the command is completed. 290 * 291 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 292 */ 293 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 294 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 295 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 296 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; 297 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 298 } 299 300 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { 301 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; 302 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 303 } 304 305 if (saved_config) 306 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 307 } 308 309 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 310 int needs_wakeup; 311 312 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 313 dwc->link_state == DWC3_LINK_STATE_U2 || 314 dwc->link_state == DWC3_LINK_STATE_U3); 315 316 if (unlikely(needs_wakeup)) { 317 ret = __dwc3_gadget_wakeup(dwc); 318 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 319 ret); 320 } 321 } 322 323 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 324 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 325 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 326 327 /* 328 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 329 * not relying on XferNotReady, we can make use of a special "No 330 * Response Update Transfer" command where we should clear both CmdAct 331 * and CmdIOC bits. 332 * 333 * With this, we don't need to wait for command completion and can 334 * straight away issue further commands to the endpoint. 335 * 336 * NOTICE: We're making an assumption that control endpoints will never 337 * make use of Update Transfer command. This is a safe assumption 338 * because we can never have more than one request at a time with 339 * Control Endpoints. If anybody changes that assumption, this chunk 340 * needs to be updated accordingly. 341 */ 342 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 343 !usb_endpoint_xfer_isoc(desc)) 344 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 345 else 346 cmd |= DWC3_DEPCMD_CMDACT; 347 348 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 349 do { 350 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 351 if (!(reg & DWC3_DEPCMD_CMDACT)) { 352 cmd_status = DWC3_DEPCMD_STATUS(reg); 353 354 switch (cmd_status) { 355 case 0: 356 ret = 0; 357 break; 358 case DEPEVT_TRANSFER_NO_RESOURCE: 359 ret = -EINVAL; 360 break; 361 case DEPEVT_TRANSFER_BUS_EXPIRY: 362 /* 363 * SW issues START TRANSFER command to 364 * isochronous ep with future frame interval. If 365 * future interval time has already passed when 366 * core receives the command, it will respond 367 * with an error status of 'Bus Expiry'. 368 * 369 * Instead of always returning -EINVAL, let's 370 * give a hint to the gadget driver that this is 371 * the case by returning -EAGAIN. 372 */ 373 ret = -EAGAIN; 374 break; 375 default: 376 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 377 } 378 379 break; 380 } 381 } while (--timeout); 382 383 if (timeout == 0) { 384 ret = -ETIMEDOUT; 385 cmd_status = -ETIMEDOUT; 386 } 387 388 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 389 390 if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 391 dep->flags |= DWC3_EP_TRANSFER_STARTED; 392 dwc3_gadget_ep_get_transfer_index(dep); 393 } 394 395 if (saved_config) { 396 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 397 reg |= saved_config; 398 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 399 } 400 401 return ret; 402 } 403 404 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 405 { 406 struct dwc3 *dwc = dep->dwc; 407 struct dwc3_gadget_ep_cmd_params params; 408 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 409 410 /* 411 * As of core revision 2.60a the recommended programming model 412 * is to set the ClearPendIN bit when issuing a Clear Stall EP 413 * command for IN endpoints. This is to prevent an issue where 414 * some (non-compliant) hosts may not send ACK TPs for pending 415 * IN transfers due to a mishandled error condition. Synopsys 416 * STAR 9000614252. 417 */ 418 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 419 (dwc->gadget.speed >= USB_SPEED_SUPER)) 420 cmd |= DWC3_DEPCMD_CLEARPENDIN; 421 422 memset(¶ms, 0, sizeof(params)); 423 424 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 425 } 426 427 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 428 struct dwc3_trb *trb) 429 { 430 u32 offset = (char *) trb - (char *) dep->trb_pool; 431 432 return dep->trb_pool_dma + offset; 433 } 434 435 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 436 { 437 struct dwc3 *dwc = dep->dwc; 438 439 if (dep->trb_pool) 440 return 0; 441 442 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 443 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 444 &dep->trb_pool_dma, GFP_KERNEL); 445 if (!dep->trb_pool) { 446 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 447 dep->name); 448 return -ENOMEM; 449 } 450 451 return 0; 452 } 453 454 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 455 { 456 struct dwc3 *dwc = dep->dwc; 457 458 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 459 dep->trb_pool, dep->trb_pool_dma); 460 461 dep->trb_pool = NULL; 462 dep->trb_pool_dma = 0; 463 } 464 465 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 466 { 467 struct dwc3_gadget_ep_cmd_params params; 468 469 memset(¶ms, 0x00, sizeof(params)); 470 471 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 472 473 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 474 ¶ms); 475 } 476 477 /** 478 * dwc3_gadget_start_config - configure ep resources 479 * @dep: endpoint that is being enabled 480 * 481 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 482 * completion, it will set Transfer Resource for all available endpoints. 483 * 484 * The assignment of transfer resources cannot perfectly follow the data book 485 * due to the fact that the controller driver does not have all knowledge of the 486 * configuration in advance. It is given this information piecemeal by the 487 * composite gadget framework after every SET_CONFIGURATION and 488 * SET_INTERFACE. Trying to follow the databook programming model in this 489 * scenario can cause errors. For two reasons: 490 * 491 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 492 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 493 * incorrect in the scenario of multiple interfaces. 494 * 495 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 496 * endpoint on alt setting (8.1.6). 497 * 498 * The following simplified method is used instead: 499 * 500 * All hardware endpoints can be assigned a transfer resource and this setting 501 * will stay persistent until either a core reset or hibernation. So whenever we 502 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 503 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 504 * guaranteed that there are as many transfer resources as endpoints. 505 * 506 * This function is called for each endpoint when it is being enabled but is 507 * triggered only when called for EP0-out, which always happens first, and which 508 * should only happen in one of the above conditions. 509 */ 510 static int dwc3_gadget_start_config(struct dwc3_ep *dep) 511 { 512 struct dwc3_gadget_ep_cmd_params params; 513 struct dwc3 *dwc; 514 u32 cmd; 515 int i; 516 int ret; 517 518 if (dep->number) 519 return 0; 520 521 memset(¶ms, 0x00, sizeof(params)); 522 cmd = DWC3_DEPCMD_DEPSTARTCFG; 523 dwc = dep->dwc; 524 525 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 526 if (ret) 527 return ret; 528 529 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 530 struct dwc3_ep *dep = dwc->eps[i]; 531 532 if (!dep) 533 continue; 534 535 ret = dwc3_gadget_set_xfer_resource(dep); 536 if (ret) 537 return ret; 538 } 539 540 return 0; 541 } 542 543 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 544 { 545 const struct usb_ss_ep_comp_descriptor *comp_desc; 546 const struct usb_endpoint_descriptor *desc; 547 struct dwc3_gadget_ep_cmd_params params; 548 struct dwc3 *dwc = dep->dwc; 549 550 comp_desc = dep->endpoint.comp_desc; 551 desc = dep->endpoint.desc; 552 553 memset(¶ms, 0x00, sizeof(params)); 554 555 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 556 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 557 558 /* Burst size is only needed in SuperSpeed mode */ 559 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 560 u32 burst = dep->endpoint.maxburst; 561 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 562 } 563 564 params.param0 |= action; 565 if (action == DWC3_DEPCFG_ACTION_RESTORE) 566 params.param2 |= dep->saved_state; 567 568 if (usb_endpoint_xfer_control(desc)) 569 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 570 571 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 572 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 573 574 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 575 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 576 | DWC3_DEPCFG_STREAM_EVENT_EN; 577 dep->stream_capable = true; 578 } 579 580 if (!usb_endpoint_xfer_control(desc)) 581 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 582 583 /* 584 * We are doing 1:1 mapping for endpoints, meaning 585 * Physical Endpoints 2 maps to Logical Endpoint 2 and 586 * so on. We consider the direction bit as part of the physical 587 * endpoint number. So USB endpoint 0x81 is 0x03. 588 */ 589 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 590 591 /* 592 * We must use the lower 16 TX FIFOs even though 593 * HW might have more 594 */ 595 if (dep->direction) 596 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 597 598 if (desc->bInterval) { 599 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 600 dep->interval = 1 << (desc->bInterval - 1); 601 } 602 603 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 604 } 605 606 /** 607 * __dwc3_gadget_ep_enable - initializes a hw endpoint 608 * @dep: endpoint to be initialized 609 * @action: one of INIT, MODIFY or RESTORE 610 * 611 * Caller should take care of locking. Execute all necessary commands to 612 * initialize a HW endpoint so it can be used by a gadget driver. 613 */ 614 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 615 { 616 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 617 struct dwc3 *dwc = dep->dwc; 618 619 u32 reg; 620 int ret; 621 622 if (!(dep->flags & DWC3_EP_ENABLED)) { 623 ret = dwc3_gadget_start_config(dep); 624 if (ret) 625 return ret; 626 } 627 628 ret = dwc3_gadget_set_ep_config(dep, action); 629 if (ret) 630 return ret; 631 632 if (!(dep->flags & DWC3_EP_ENABLED)) { 633 struct dwc3_trb *trb_st_hw; 634 struct dwc3_trb *trb_link; 635 636 dep->type = usb_endpoint_type(desc); 637 dep->flags |= DWC3_EP_ENABLED; 638 639 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 640 reg |= DWC3_DALEPENA_EP(dep->number); 641 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 642 643 if (usb_endpoint_xfer_control(desc)) 644 goto out; 645 646 /* Initialize the TRB ring */ 647 dep->trb_dequeue = 0; 648 dep->trb_enqueue = 0; 649 memset(dep->trb_pool, 0, 650 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 651 652 /* Link TRB. The HWO bit is never reset */ 653 trb_st_hw = &dep->trb_pool[0]; 654 655 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 656 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 657 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 658 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 659 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 660 } 661 662 /* 663 * Issue StartTransfer here with no-op TRB so we can always rely on No 664 * Response Update Transfer command. 665 */ 666 if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) || 667 usb_endpoint_xfer_int(desc)) { 668 struct dwc3_gadget_ep_cmd_params params; 669 struct dwc3_trb *trb; 670 dma_addr_t trb_dma; 671 u32 cmd; 672 673 memset(¶ms, 0, sizeof(params)); 674 trb = &dep->trb_pool[0]; 675 trb_dma = dwc3_trb_dma_offset(dep, trb); 676 677 params.param0 = upper_32_bits(trb_dma); 678 params.param1 = lower_32_bits(trb_dma); 679 680 cmd = DWC3_DEPCMD_STARTTRANSFER; 681 682 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 683 if (ret < 0) 684 return ret; 685 } 686 687 out: 688 trace_dwc3_gadget_ep_enable(dep); 689 690 return 0; 691 } 692 693 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 694 bool interrupt); 695 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 696 { 697 struct dwc3_request *req; 698 699 dwc3_stop_active_transfer(dep, true, false); 700 701 /* - giveback all requests to gadget driver */ 702 while (!list_empty(&dep->started_list)) { 703 req = next_request(&dep->started_list); 704 705 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 706 } 707 708 while (!list_empty(&dep->pending_list)) { 709 req = next_request(&dep->pending_list); 710 711 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 712 } 713 714 while (!list_empty(&dep->cancelled_list)) { 715 req = next_request(&dep->cancelled_list); 716 717 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 718 } 719 } 720 721 /** 722 * __dwc3_gadget_ep_disable - disables a hw endpoint 723 * @dep: the endpoint to disable 724 * 725 * This function undoes what __dwc3_gadget_ep_enable did and also removes 726 * requests which are currently being processed by the hardware and those which 727 * are not yet scheduled. 728 * 729 * Caller should take care of locking. 730 */ 731 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 732 { 733 struct dwc3 *dwc = dep->dwc; 734 u32 reg; 735 736 trace_dwc3_gadget_ep_disable(dep); 737 738 dwc3_remove_requests(dwc, dep); 739 740 /* make sure HW endpoint isn't stalled */ 741 if (dep->flags & DWC3_EP_STALL) 742 __dwc3_gadget_ep_set_halt(dep, 0, false); 743 744 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 745 reg &= ~DWC3_DALEPENA_EP(dep->number); 746 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 747 748 dep->stream_capable = false; 749 dep->type = 0; 750 dep->flags = 0; 751 752 /* Clear out the ep descriptors for non-ep0 */ 753 if (dep->number > 1) { 754 dep->endpoint.comp_desc = NULL; 755 dep->endpoint.desc = NULL; 756 } 757 758 return 0; 759 } 760 761 /* -------------------------------------------------------------------------- */ 762 763 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 764 const struct usb_endpoint_descriptor *desc) 765 { 766 return -EINVAL; 767 } 768 769 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 770 { 771 return -EINVAL; 772 } 773 774 /* -------------------------------------------------------------------------- */ 775 776 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 777 const struct usb_endpoint_descriptor *desc) 778 { 779 struct dwc3_ep *dep; 780 struct dwc3 *dwc; 781 unsigned long flags; 782 int ret; 783 784 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 785 pr_debug("dwc3: invalid parameters\n"); 786 return -EINVAL; 787 } 788 789 if (!desc->wMaxPacketSize) { 790 pr_debug("dwc3: missing wMaxPacketSize\n"); 791 return -EINVAL; 792 } 793 794 dep = to_dwc3_ep(ep); 795 dwc = dep->dwc; 796 797 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 798 "%s is already enabled\n", 799 dep->name)) 800 return 0; 801 802 spin_lock_irqsave(&dwc->lock, flags); 803 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 804 spin_unlock_irqrestore(&dwc->lock, flags); 805 806 return ret; 807 } 808 809 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 810 { 811 struct dwc3_ep *dep; 812 struct dwc3 *dwc; 813 unsigned long flags; 814 int ret; 815 816 if (!ep) { 817 pr_debug("dwc3: invalid parameters\n"); 818 return -EINVAL; 819 } 820 821 dep = to_dwc3_ep(ep); 822 dwc = dep->dwc; 823 824 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 825 "%s is already disabled\n", 826 dep->name)) 827 return 0; 828 829 spin_lock_irqsave(&dwc->lock, flags); 830 ret = __dwc3_gadget_ep_disable(dep); 831 spin_unlock_irqrestore(&dwc->lock, flags); 832 833 return ret; 834 } 835 836 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 837 gfp_t gfp_flags) 838 { 839 struct dwc3_request *req; 840 struct dwc3_ep *dep = to_dwc3_ep(ep); 841 842 req = kzalloc(sizeof(*req), gfp_flags); 843 if (!req) 844 return NULL; 845 846 req->direction = dep->direction; 847 req->epnum = dep->number; 848 req->dep = dep; 849 req->status = DWC3_REQUEST_STATUS_UNKNOWN; 850 851 trace_dwc3_alloc_request(req); 852 853 return &req->request; 854 } 855 856 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 857 struct usb_request *request) 858 { 859 struct dwc3_request *req = to_dwc3_request(request); 860 861 trace_dwc3_free_request(req); 862 kfree(req); 863 } 864 865 /** 866 * dwc3_ep_prev_trb - returns the previous TRB in the ring 867 * @dep: The endpoint with the TRB ring 868 * @index: The index of the current TRB in the ring 869 * 870 * Returns the TRB prior to the one pointed to by the index. If the 871 * index is 0, we will wrap backwards, skip the link TRB, and return 872 * the one just before that. 873 */ 874 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 875 { 876 u8 tmp = index; 877 878 if (!tmp) 879 tmp = DWC3_TRB_NUM - 1; 880 881 return &dep->trb_pool[tmp - 1]; 882 } 883 884 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 885 { 886 struct dwc3_trb *tmp; 887 u8 trbs_left; 888 889 /* 890 * If enqueue & dequeue are equal than it is either full or empty. 891 * 892 * One way to know for sure is if the TRB right before us has HWO bit 893 * set or not. If it has, then we're definitely full and can't fit any 894 * more transfers in our ring. 895 */ 896 if (dep->trb_enqueue == dep->trb_dequeue) { 897 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 898 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 899 return 0; 900 901 return DWC3_TRB_NUM - 1; 902 } 903 904 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 905 trbs_left &= (DWC3_TRB_NUM - 1); 906 907 if (dep->trb_dequeue < dep->trb_enqueue) 908 trbs_left--; 909 910 return trbs_left; 911 } 912 913 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 914 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 915 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 916 { 917 struct dwc3 *dwc = dep->dwc; 918 struct usb_gadget *gadget = &dwc->gadget; 919 enum usb_device_speed speed = gadget->speed; 920 921 trb->size = DWC3_TRB_SIZE_LENGTH(length); 922 trb->bpl = lower_32_bits(dma); 923 trb->bph = upper_32_bits(dma); 924 925 switch (usb_endpoint_type(dep->endpoint.desc)) { 926 case USB_ENDPOINT_XFER_CONTROL: 927 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 928 break; 929 930 case USB_ENDPOINT_XFER_ISOC: 931 if (!node) { 932 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 933 934 /* 935 * USB Specification 2.0 Section 5.9.2 states that: "If 936 * there is only a single transaction in the microframe, 937 * only a DATA0 data packet PID is used. If there are 938 * two transactions per microframe, DATA1 is used for 939 * the first transaction data packet and DATA0 is used 940 * for the second transaction data packet. If there are 941 * three transactions per microframe, DATA2 is used for 942 * the first transaction data packet, DATA1 is used for 943 * the second, and DATA0 is used for the third." 944 * 945 * IOW, we should satisfy the following cases: 946 * 947 * 1) length <= maxpacket 948 * - DATA0 949 * 950 * 2) maxpacket < length <= (2 * maxpacket) 951 * - DATA1, DATA0 952 * 953 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 954 * - DATA2, DATA1, DATA0 955 */ 956 if (speed == USB_SPEED_HIGH) { 957 struct usb_ep *ep = &dep->endpoint; 958 unsigned int mult = 2; 959 unsigned int maxp = usb_endpoint_maxp(ep->desc); 960 961 if (length <= (2 * maxp)) 962 mult--; 963 964 if (length <= maxp) 965 mult--; 966 967 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 968 } 969 } else { 970 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 971 } 972 973 /* always enable Interrupt on Missed ISOC */ 974 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 975 break; 976 977 case USB_ENDPOINT_XFER_BULK: 978 case USB_ENDPOINT_XFER_INT: 979 trb->ctrl = DWC3_TRBCTL_NORMAL; 980 break; 981 default: 982 /* 983 * This is only possible with faulty memory because we 984 * checked it already :) 985 */ 986 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 987 usb_endpoint_type(dep->endpoint.desc)); 988 } 989 990 /* 991 * Enable Continue on Short Packet 992 * when endpoint is not a stream capable 993 */ 994 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 995 if (!dep->stream_capable) 996 trb->ctrl |= DWC3_TRB_CTRL_CSP; 997 998 if (short_not_ok) 999 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1000 } 1001 1002 if ((!no_interrupt && !chain) || 1003 (dwc3_calc_trbs_left(dep) == 1)) 1004 trb->ctrl |= DWC3_TRB_CTRL_IOC; 1005 1006 if (chain) 1007 trb->ctrl |= DWC3_TRB_CTRL_CHN; 1008 1009 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 1010 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 1011 1012 trb->ctrl |= DWC3_TRB_CTRL_HWO; 1013 1014 dwc3_ep_inc_enq(dep); 1015 1016 trace_dwc3_prepare_trb(dep, trb); 1017 } 1018 1019 /** 1020 * dwc3_prepare_one_trb - setup one TRB from one request 1021 * @dep: endpoint for which this request is prepared 1022 * @req: dwc3_request pointer 1023 * @chain: should this TRB be chained to the next? 1024 * @node: only for isochronous endpoints. First TRB needs different type. 1025 */ 1026 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 1027 struct dwc3_request *req, unsigned chain, unsigned node) 1028 { 1029 struct dwc3_trb *trb; 1030 unsigned int length; 1031 dma_addr_t dma; 1032 unsigned stream_id = req->request.stream_id; 1033 unsigned short_not_ok = req->request.short_not_ok; 1034 unsigned no_interrupt = req->request.no_interrupt; 1035 1036 if (req->request.num_sgs > 0) { 1037 length = sg_dma_len(req->start_sg); 1038 dma = sg_dma_address(req->start_sg); 1039 } else { 1040 length = req->request.length; 1041 dma = req->request.dma; 1042 } 1043 1044 trb = &dep->trb_pool[dep->trb_enqueue]; 1045 1046 if (!req->trb) { 1047 dwc3_gadget_move_started_request(req); 1048 req->trb = trb; 1049 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1050 } 1051 1052 req->num_trbs++; 1053 1054 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1055 stream_id, short_not_ok, no_interrupt); 1056 } 1057 1058 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1059 struct dwc3_request *req) 1060 { 1061 struct scatterlist *sg = req->start_sg; 1062 struct scatterlist *s; 1063 int i; 1064 1065 unsigned int remaining = req->request.num_mapped_sgs 1066 - req->num_queued_sgs; 1067 1068 for_each_sg(sg, s, remaining, i) { 1069 unsigned int length = req->request.length; 1070 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1071 unsigned int rem = length % maxp; 1072 unsigned chain = true; 1073 1074 if (sg_is_last(s)) 1075 chain = false; 1076 1077 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1078 struct dwc3 *dwc = dep->dwc; 1079 struct dwc3_trb *trb; 1080 1081 req->needs_extra_trb = true; 1082 1083 /* prepare normal TRB */ 1084 dwc3_prepare_one_trb(dep, req, true, i); 1085 1086 /* Now prepare one extra TRB to align transfer size */ 1087 trb = &dep->trb_pool[dep->trb_enqueue]; 1088 req->num_trbs++; 1089 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1090 maxp - rem, false, 1, 1091 req->request.stream_id, 1092 req->request.short_not_ok, 1093 req->request.no_interrupt); 1094 } else { 1095 dwc3_prepare_one_trb(dep, req, chain, i); 1096 } 1097 1098 /* 1099 * There can be a situation where all sgs in sglist are not 1100 * queued because of insufficient trb number. To handle this 1101 * case, update start_sg to next sg to be queued, so that 1102 * we have free trbs we can continue queuing from where we 1103 * previously stopped 1104 */ 1105 if (chain) 1106 req->start_sg = sg_next(s); 1107 1108 req->num_queued_sgs++; 1109 1110 if (!dwc3_calc_trbs_left(dep)) 1111 break; 1112 } 1113 } 1114 1115 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1116 struct dwc3_request *req) 1117 { 1118 unsigned int length = req->request.length; 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1120 unsigned int rem = length % maxp; 1121 1122 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { 1123 struct dwc3 *dwc = dep->dwc; 1124 struct dwc3_trb *trb; 1125 1126 req->needs_extra_trb = true; 1127 1128 /* prepare normal TRB */ 1129 dwc3_prepare_one_trb(dep, req, true, 0); 1130 1131 /* Now prepare one extra TRB to align transfer size */ 1132 trb = &dep->trb_pool[dep->trb_enqueue]; 1133 req->num_trbs++; 1134 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1135 false, 1, req->request.stream_id, 1136 req->request.short_not_ok, 1137 req->request.no_interrupt); 1138 } else if (req->request.zero && req->request.length && 1139 (IS_ALIGNED(req->request.length, maxp))) { 1140 struct dwc3 *dwc = dep->dwc; 1141 struct dwc3_trb *trb; 1142 1143 req->needs_extra_trb = true; 1144 1145 /* prepare normal TRB */ 1146 dwc3_prepare_one_trb(dep, req, true, 0); 1147 1148 /* Now prepare one extra TRB to handle ZLP */ 1149 trb = &dep->trb_pool[dep->trb_enqueue]; 1150 req->num_trbs++; 1151 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1152 false, 1, req->request.stream_id, 1153 req->request.short_not_ok, 1154 req->request.no_interrupt); 1155 } else { 1156 dwc3_prepare_one_trb(dep, req, false, 0); 1157 } 1158 } 1159 1160 /* 1161 * dwc3_prepare_trbs - setup TRBs from requests 1162 * @dep: endpoint for which requests are being prepared 1163 * 1164 * The function goes through the requests list and sets up TRBs for the 1165 * transfers. The function returns once there are no more TRBs available or 1166 * it runs out of requests. 1167 */ 1168 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1169 { 1170 struct dwc3_request *req, *n; 1171 1172 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1173 1174 /* 1175 * We can get in a situation where there's a request in the started list 1176 * but there weren't enough TRBs to fully kick it in the first time 1177 * around, so it has been waiting for more TRBs to be freed up. 1178 * 1179 * In that case, we should check if we have a request with pending_sgs 1180 * in the started list and prepare TRBs for that request first, 1181 * otherwise we will prepare TRBs completely out of order and that will 1182 * break things. 1183 */ 1184 list_for_each_entry(req, &dep->started_list, list) { 1185 if (req->num_pending_sgs > 0) 1186 dwc3_prepare_one_trb_sg(dep, req); 1187 1188 if (!dwc3_calc_trbs_left(dep)) 1189 return; 1190 } 1191 1192 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1193 struct dwc3 *dwc = dep->dwc; 1194 int ret; 1195 1196 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1197 dep->direction); 1198 if (ret) 1199 return; 1200 1201 req->sg = req->request.sg; 1202 req->start_sg = req->sg; 1203 req->num_queued_sgs = 0; 1204 req->num_pending_sgs = req->request.num_mapped_sgs; 1205 1206 if (req->num_pending_sgs > 0) 1207 dwc3_prepare_one_trb_sg(dep, req); 1208 else 1209 dwc3_prepare_one_trb_linear(dep, req); 1210 1211 if (!dwc3_calc_trbs_left(dep)) 1212 return; 1213 } 1214 } 1215 1216 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) 1217 { 1218 struct dwc3_gadget_ep_cmd_params params; 1219 struct dwc3_request *req; 1220 int starting; 1221 int ret; 1222 u32 cmd; 1223 1224 if (!dwc3_calc_trbs_left(dep)) 1225 return 0; 1226 1227 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1228 1229 dwc3_prepare_trbs(dep); 1230 req = next_request(&dep->started_list); 1231 if (!req) { 1232 dep->flags |= DWC3_EP_PENDING_REQUEST; 1233 return 0; 1234 } 1235 1236 memset(¶ms, 0, sizeof(params)); 1237 1238 if (starting) { 1239 params.param0 = upper_32_bits(req->trb_dma); 1240 params.param1 = lower_32_bits(req->trb_dma); 1241 cmd = DWC3_DEPCMD_STARTTRANSFER; 1242 1243 if (dep->stream_capable) 1244 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id); 1245 1246 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1247 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); 1248 } else { 1249 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1250 DWC3_DEPCMD_PARAM(dep->resource_index); 1251 } 1252 1253 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1254 if (ret < 0) { 1255 /* 1256 * FIXME we need to iterate over the list of requests 1257 * here and stop, unmap, free and del each of the linked 1258 * requests instead of what we do now. 1259 */ 1260 if (req->trb) 1261 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1262 dwc3_gadget_del_and_unmap_request(dep, req, ret); 1263 return ret; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1270 { 1271 u32 reg; 1272 1273 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1274 return DWC3_DSTS_SOFFN(reg); 1275 } 1276 1277 /** 1278 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number 1279 * @dep: isoc endpoint 1280 * 1281 * This function tests for the correct combination of BIT[15:14] from the 16-bit 1282 * microframe number reported by the XferNotReady event for the future frame 1283 * number to start the isoc transfer. 1284 * 1285 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed 1286 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the 1287 * XferNotReady event are invalid. The driver uses this number to schedule the 1288 * isochronous transfer and passes it to the START TRANSFER command. Because 1289 * this number is invalid, the command may fail. If BIT[15:14] matches the 1290 * internal 16-bit microframe, the START TRANSFER command will pass and the 1291 * transfer will start at the scheduled time, if it is off by 1, the command 1292 * will still pass, but the transfer will start 2 seconds in the future. For all 1293 * other conditions, the START TRANSFER command will fail with bus-expiry. 1294 * 1295 * In order to workaround this issue, we can test for the correct combination of 1296 * BIT[15:14] by sending START TRANSFER commands with different values of 1297 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart 1298 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status. 1299 * As the result, within the 4 possible combinations for BIT[15:14], there will 1300 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful 1301 * command status will result in a 2-second delay start. The smaller BIT[15:14] 1302 * value is the correct combination. 1303 * 1304 * Since there are only 4 outcomes and the results are ordered, we can simply 1305 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to 1306 * deduce the smaller successful combination. 1307 * 1308 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01 1309 * of BIT[15:14]. The correct combination is as follow: 1310 * 1311 * if test0 fails and test1 passes, BIT[15:14] is 'b01 1312 * if test0 fails and test1 fails, BIT[15:14] is 'b10 1313 * if test0 passes and test1 fails, BIT[15:14] is 'b11 1314 * if test0 passes and test1 passes, BIT[15:14] is 'b00 1315 * 1316 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN 1317 * endpoints. 1318 */ 1319 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep) 1320 { 1321 int cmd_status = 0; 1322 bool test0; 1323 bool test1; 1324 1325 while (dep->combo_num < 2) { 1326 struct dwc3_gadget_ep_cmd_params params; 1327 u32 test_frame_number; 1328 u32 cmd; 1329 1330 /* 1331 * Check if we can start isoc transfer on the next interval or 1332 * 4 uframes in the future with BIT[15:14] as dep->combo_num 1333 */ 1334 test_frame_number = dep->frame_number & 0x3fff; 1335 test_frame_number |= dep->combo_num << 14; 1336 test_frame_number += max_t(u32, 4, dep->interval); 1337 1338 params.param0 = upper_32_bits(dep->dwc->bounce_addr); 1339 params.param1 = lower_32_bits(dep->dwc->bounce_addr); 1340 1341 cmd = DWC3_DEPCMD_STARTTRANSFER; 1342 cmd |= DWC3_DEPCMD_PARAM(test_frame_number); 1343 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1344 1345 /* Redo if some other failure beside bus-expiry is received */ 1346 if (cmd_status && cmd_status != -EAGAIN) { 1347 dep->start_cmd_status = 0; 1348 dep->combo_num = 0; 1349 return 0; 1350 } 1351 1352 /* Store the first test status */ 1353 if (dep->combo_num == 0) 1354 dep->start_cmd_status = cmd_status; 1355 1356 dep->combo_num++; 1357 1358 /* 1359 * End the transfer if the START_TRANSFER command is successful 1360 * to wait for the next XferNotReady to test the command again 1361 */ 1362 if (cmd_status == 0) { 1363 dwc3_stop_active_transfer(dep, true, true); 1364 return 0; 1365 } 1366 } 1367 1368 /* test0 and test1 are both completed at this point */ 1369 test0 = (dep->start_cmd_status == 0); 1370 test1 = (cmd_status == 0); 1371 1372 if (!test0 && test1) 1373 dep->combo_num = 1; 1374 else if (!test0 && !test1) 1375 dep->combo_num = 2; 1376 else if (test0 && !test1) 1377 dep->combo_num = 3; 1378 else if (test0 && test1) 1379 dep->combo_num = 0; 1380 1381 dep->frame_number &= 0x3fff; 1382 dep->frame_number |= dep->combo_num << 14; 1383 dep->frame_number += max_t(u32, 4, dep->interval); 1384 1385 /* Reinitialize test variables */ 1386 dep->start_cmd_status = 0; 1387 dep->combo_num = 0; 1388 1389 return __dwc3_gadget_kick_transfer(dep); 1390 } 1391 1392 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1393 { 1394 struct dwc3 *dwc = dep->dwc; 1395 int ret; 1396 int i; 1397 1398 if (list_empty(&dep->pending_list)) { 1399 dep->flags |= DWC3_EP_PENDING_REQUEST; 1400 return -EAGAIN; 1401 } 1402 1403 if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) && 1404 (dwc->revision <= DWC3_USB31_REVISION_160A || 1405 (dwc->revision == DWC3_USB31_REVISION_170A && 1406 dwc->version_type >= DWC31_VERSIONTYPE_EA01 && 1407 dwc->version_type <= DWC31_VERSIONTYPE_EA06))) { 1408 1409 if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction) 1410 return dwc3_gadget_start_isoc_quirk(dep); 1411 } 1412 1413 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) { 1414 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1); 1415 1416 ret = __dwc3_gadget_kick_transfer(dep); 1417 if (ret != -EAGAIN) 1418 break; 1419 } 1420 1421 return ret; 1422 } 1423 1424 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1425 { 1426 struct dwc3 *dwc = dep->dwc; 1427 1428 if (!dep->endpoint.desc) { 1429 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1430 dep->name); 1431 return -ESHUTDOWN; 1432 } 1433 1434 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1435 &req->request, req->dep->name)) 1436 return -EINVAL; 1437 1438 if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED, 1439 "%s: request %pK already in flight\n", 1440 dep->name, &req->request)) 1441 return -EINVAL; 1442 1443 pm_runtime_get(dwc->dev); 1444 1445 req->request.actual = 0; 1446 req->request.status = -EINPROGRESS; 1447 1448 trace_dwc3_ep_queue(req); 1449 1450 list_add_tail(&req->list, &dep->pending_list); 1451 req->status = DWC3_REQUEST_STATUS_QUEUED; 1452 1453 /* Start the transfer only after the END_TRANSFER is completed */ 1454 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { 1455 dep->flags |= DWC3_EP_DELAY_START; 1456 return 0; 1457 } 1458 1459 /* 1460 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1461 * wait for a XferNotReady event so we will know what's the current 1462 * (micro-)frame number. 1463 * 1464 * Without this trick, we are very, very likely gonna get Bus Expiry 1465 * errors which will force us issue EndTransfer command. 1466 */ 1467 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1468 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && 1469 !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 1470 return 0; 1471 1472 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1473 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1474 return __dwc3_gadget_start_isoc(dep); 1475 } 1476 } 1477 } 1478 1479 return __dwc3_gadget_kick_transfer(dep); 1480 } 1481 1482 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1483 gfp_t gfp_flags) 1484 { 1485 struct dwc3_request *req = to_dwc3_request(request); 1486 struct dwc3_ep *dep = to_dwc3_ep(ep); 1487 struct dwc3 *dwc = dep->dwc; 1488 1489 unsigned long flags; 1490 1491 int ret; 1492 1493 spin_lock_irqsave(&dwc->lock, flags); 1494 ret = __dwc3_gadget_ep_queue(dep, req); 1495 spin_unlock_irqrestore(&dwc->lock, flags); 1496 1497 return ret; 1498 } 1499 1500 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) 1501 { 1502 int i; 1503 1504 /* 1505 * If request was already started, this means we had to 1506 * stop the transfer. With that we also need to ignore 1507 * all TRBs used by the request, however TRBs can only 1508 * be modified after completion of END_TRANSFER 1509 * command. So what we do here is that we wait for 1510 * END_TRANSFER completion and only after that, we jump 1511 * over TRBs by clearing HWO and incrementing dequeue 1512 * pointer. 1513 */ 1514 for (i = 0; i < req->num_trbs; i++) { 1515 struct dwc3_trb *trb; 1516 1517 trb = req->trb + i; 1518 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1519 dwc3_ep_inc_deq(dep); 1520 } 1521 1522 req->num_trbs = 0; 1523 } 1524 1525 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) 1526 { 1527 struct dwc3_request *req; 1528 struct dwc3_request *tmp; 1529 1530 list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { 1531 dwc3_gadget_ep_skip_trbs(dep, req); 1532 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1533 } 1534 } 1535 1536 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1537 struct usb_request *request) 1538 { 1539 struct dwc3_request *req = to_dwc3_request(request); 1540 struct dwc3_request *r = NULL; 1541 1542 struct dwc3_ep *dep = to_dwc3_ep(ep); 1543 struct dwc3 *dwc = dep->dwc; 1544 1545 unsigned long flags; 1546 int ret = 0; 1547 1548 trace_dwc3_ep_dequeue(req); 1549 1550 spin_lock_irqsave(&dwc->lock, flags); 1551 1552 list_for_each_entry(r, &dep->pending_list, list) { 1553 if (r == req) 1554 break; 1555 } 1556 1557 if (r != req) { 1558 list_for_each_entry(r, &dep->started_list, list) { 1559 if (r == req) 1560 break; 1561 } 1562 if (r == req) { 1563 /* wait until it is processed */ 1564 dwc3_stop_active_transfer(dep, true, true); 1565 1566 if (!r->trb) 1567 goto out0; 1568 1569 dwc3_gadget_move_cancelled_request(req); 1570 if (dep->flags & DWC3_EP_TRANSFER_STARTED) 1571 goto out0; 1572 else 1573 goto out1; 1574 } 1575 dev_err(dwc->dev, "request %pK was not queued to %s\n", 1576 request, ep->name); 1577 ret = -EINVAL; 1578 goto out0; 1579 } 1580 1581 out1: 1582 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1583 1584 out0: 1585 spin_unlock_irqrestore(&dwc->lock, flags); 1586 1587 return ret; 1588 } 1589 1590 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1591 { 1592 struct dwc3_gadget_ep_cmd_params params; 1593 struct dwc3 *dwc = dep->dwc; 1594 int ret; 1595 1596 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1597 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1598 return -EINVAL; 1599 } 1600 1601 memset(¶ms, 0x00, sizeof(params)); 1602 1603 if (value) { 1604 struct dwc3_trb *trb; 1605 1606 unsigned transfer_in_flight; 1607 unsigned started; 1608 1609 if (dep->number > 1) 1610 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1611 else 1612 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1613 1614 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1615 started = !list_empty(&dep->started_list); 1616 1617 if (!protocol && ((dep->direction && transfer_in_flight) || 1618 (!dep->direction && started))) { 1619 return -EAGAIN; 1620 } 1621 1622 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1623 ¶ms); 1624 if (ret) 1625 dev_err(dwc->dev, "failed to set STALL on %s\n", 1626 dep->name); 1627 else 1628 dep->flags |= DWC3_EP_STALL; 1629 } else { 1630 1631 ret = dwc3_send_clear_stall_ep_cmd(dep); 1632 if (ret) 1633 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1634 dep->name); 1635 else 1636 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1637 } 1638 1639 return ret; 1640 } 1641 1642 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1643 { 1644 struct dwc3_ep *dep = to_dwc3_ep(ep); 1645 struct dwc3 *dwc = dep->dwc; 1646 1647 unsigned long flags; 1648 1649 int ret; 1650 1651 spin_lock_irqsave(&dwc->lock, flags); 1652 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1653 spin_unlock_irqrestore(&dwc->lock, flags); 1654 1655 return ret; 1656 } 1657 1658 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1659 { 1660 struct dwc3_ep *dep = to_dwc3_ep(ep); 1661 struct dwc3 *dwc = dep->dwc; 1662 unsigned long flags; 1663 int ret; 1664 1665 spin_lock_irqsave(&dwc->lock, flags); 1666 dep->flags |= DWC3_EP_WEDGE; 1667 1668 if (dep->number == 0 || dep->number == 1) 1669 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1670 else 1671 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1672 spin_unlock_irqrestore(&dwc->lock, flags); 1673 1674 return ret; 1675 } 1676 1677 /* -------------------------------------------------------------------------- */ 1678 1679 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1680 .bLength = USB_DT_ENDPOINT_SIZE, 1681 .bDescriptorType = USB_DT_ENDPOINT, 1682 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1683 }; 1684 1685 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1686 .enable = dwc3_gadget_ep0_enable, 1687 .disable = dwc3_gadget_ep0_disable, 1688 .alloc_request = dwc3_gadget_ep_alloc_request, 1689 .free_request = dwc3_gadget_ep_free_request, 1690 .queue = dwc3_gadget_ep0_queue, 1691 .dequeue = dwc3_gadget_ep_dequeue, 1692 .set_halt = dwc3_gadget_ep0_set_halt, 1693 .set_wedge = dwc3_gadget_ep_set_wedge, 1694 }; 1695 1696 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1697 .enable = dwc3_gadget_ep_enable, 1698 .disable = dwc3_gadget_ep_disable, 1699 .alloc_request = dwc3_gadget_ep_alloc_request, 1700 .free_request = dwc3_gadget_ep_free_request, 1701 .queue = dwc3_gadget_ep_queue, 1702 .dequeue = dwc3_gadget_ep_dequeue, 1703 .set_halt = dwc3_gadget_ep_set_halt, 1704 .set_wedge = dwc3_gadget_ep_set_wedge, 1705 }; 1706 1707 /* -------------------------------------------------------------------------- */ 1708 1709 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1710 { 1711 struct dwc3 *dwc = gadget_to_dwc(g); 1712 1713 return __dwc3_gadget_get_frame(dwc); 1714 } 1715 1716 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1717 { 1718 int retries; 1719 1720 int ret; 1721 u32 reg; 1722 1723 u8 link_state; 1724 u8 speed; 1725 1726 /* 1727 * According to the Databook Remote wakeup request should 1728 * be issued only when the device is in early suspend state. 1729 * 1730 * We can check that via USB Link State bits in DSTS register. 1731 */ 1732 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1733 1734 speed = reg & DWC3_DSTS_CONNECTSPD; 1735 if ((speed == DWC3_DSTS_SUPERSPEED) || 1736 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1737 return 0; 1738 1739 link_state = DWC3_DSTS_USBLNKST(reg); 1740 1741 switch (link_state) { 1742 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1743 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1744 break; 1745 default: 1746 return -EINVAL; 1747 } 1748 1749 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1750 if (ret < 0) { 1751 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1752 return ret; 1753 } 1754 1755 /* Recent versions do this automatically */ 1756 if (dwc->revision < DWC3_REVISION_194A) { 1757 /* write zeroes to Link Change Request */ 1758 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1759 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1760 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1761 } 1762 1763 /* poll until Link State changes to ON */ 1764 retries = 20000; 1765 1766 while (retries--) { 1767 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1768 1769 /* in HS, means ON */ 1770 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1771 break; 1772 } 1773 1774 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1775 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1776 return -EINVAL; 1777 } 1778 1779 return 0; 1780 } 1781 1782 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1783 { 1784 struct dwc3 *dwc = gadget_to_dwc(g); 1785 unsigned long flags; 1786 int ret; 1787 1788 spin_lock_irqsave(&dwc->lock, flags); 1789 ret = __dwc3_gadget_wakeup(dwc); 1790 spin_unlock_irqrestore(&dwc->lock, flags); 1791 1792 return ret; 1793 } 1794 1795 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1796 int is_selfpowered) 1797 { 1798 struct dwc3 *dwc = gadget_to_dwc(g); 1799 unsigned long flags; 1800 1801 spin_lock_irqsave(&dwc->lock, flags); 1802 g->is_selfpowered = !!is_selfpowered; 1803 spin_unlock_irqrestore(&dwc->lock, flags); 1804 1805 return 0; 1806 } 1807 1808 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1809 { 1810 u32 reg; 1811 u32 timeout = 500; 1812 1813 if (pm_runtime_suspended(dwc->dev)) 1814 return 0; 1815 1816 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1817 if (is_on) { 1818 if (dwc->revision <= DWC3_REVISION_187A) { 1819 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1820 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1821 } 1822 1823 if (dwc->revision >= DWC3_REVISION_194A) 1824 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1825 reg |= DWC3_DCTL_RUN_STOP; 1826 1827 if (dwc->has_hibernation) 1828 reg |= DWC3_DCTL_KEEP_CONNECT; 1829 1830 dwc->pullups_connected = true; 1831 } else { 1832 reg &= ~DWC3_DCTL_RUN_STOP; 1833 1834 if (dwc->has_hibernation && !suspend) 1835 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1836 1837 dwc->pullups_connected = false; 1838 } 1839 1840 dwc3_gadget_dctl_write_safe(dwc, reg); 1841 1842 do { 1843 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1844 reg &= DWC3_DSTS_DEVCTRLHLT; 1845 } while (--timeout && !(!is_on ^ !reg)); 1846 1847 if (!timeout) 1848 return -ETIMEDOUT; 1849 1850 return 0; 1851 } 1852 1853 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1854 { 1855 struct dwc3 *dwc = gadget_to_dwc(g); 1856 unsigned long flags; 1857 int ret; 1858 1859 is_on = !!is_on; 1860 1861 /* 1862 * Per databook, when we want to stop the gadget, if a control transfer 1863 * is still in process, complete it and get the core into setup phase. 1864 */ 1865 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1866 reinit_completion(&dwc->ep0_in_setup); 1867 1868 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1869 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1870 if (ret == 0) { 1871 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1872 return -ETIMEDOUT; 1873 } 1874 } 1875 1876 spin_lock_irqsave(&dwc->lock, flags); 1877 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1878 spin_unlock_irqrestore(&dwc->lock, flags); 1879 1880 return ret; 1881 } 1882 1883 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1884 { 1885 u32 reg; 1886 1887 /* Enable all but Start and End of Frame IRQs */ 1888 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1889 DWC3_DEVTEN_EVNTOVERFLOWEN | 1890 DWC3_DEVTEN_CMDCMPLTEN | 1891 DWC3_DEVTEN_ERRTICERREN | 1892 DWC3_DEVTEN_WKUPEVTEN | 1893 DWC3_DEVTEN_CONNECTDONEEN | 1894 DWC3_DEVTEN_USBRSTEN | 1895 DWC3_DEVTEN_DISCONNEVTEN); 1896 1897 if (dwc->revision < DWC3_REVISION_250A) 1898 reg |= DWC3_DEVTEN_ULSTCNGEN; 1899 1900 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1901 } 1902 1903 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1904 { 1905 /* mask all interrupts */ 1906 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1907 } 1908 1909 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1910 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1911 1912 /** 1913 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 1914 * @dwc: pointer to our context structure 1915 * 1916 * The following looks like complex but it's actually very simple. In order to 1917 * calculate the number of packets we can burst at once on OUT transfers, we're 1918 * gonna use RxFIFO size. 1919 * 1920 * To calculate RxFIFO size we need two numbers: 1921 * MDWIDTH = size, in bits, of the internal memory bus 1922 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1923 * 1924 * Given these two numbers, the formula is simple: 1925 * 1926 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1927 * 1928 * 24 bytes is for 3x SETUP packets 1929 * 16 bytes is a clock domain crossing tolerance 1930 * 1931 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1932 */ 1933 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1934 { 1935 u32 ram2_depth; 1936 u32 mdwidth; 1937 u32 nump; 1938 u32 reg; 1939 1940 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1941 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1942 1943 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1944 nump = min_t(u32, nump, 16); 1945 1946 /* update NumP */ 1947 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1948 reg &= ~DWC3_DCFG_NUMP_MASK; 1949 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1950 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1951 } 1952 1953 static int __dwc3_gadget_start(struct dwc3 *dwc) 1954 { 1955 struct dwc3_ep *dep; 1956 int ret = 0; 1957 u32 reg; 1958 1959 /* 1960 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1961 * the core supports IMOD, disable it. 1962 */ 1963 if (dwc->imod_interval) { 1964 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1965 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1966 } else if (dwc3_has_imod(dwc)) { 1967 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1968 } 1969 1970 /* 1971 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1972 * field instead of letting dwc3 itself calculate that automatically. 1973 * 1974 * This way, we maximize the chances that we'll be able to get several 1975 * bursts of data without going through any sort of endpoint throttling. 1976 */ 1977 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1978 if (dwc3_is_usb31(dwc)) 1979 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; 1980 else 1981 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1982 1983 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1984 1985 dwc3_gadget_setup_nump(dwc); 1986 1987 /* Start with SuperSpeed Default */ 1988 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1989 1990 dep = dwc->eps[0]; 1991 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1992 if (ret) { 1993 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1994 goto err0; 1995 } 1996 1997 dep = dwc->eps[1]; 1998 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1999 if (ret) { 2000 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2001 goto err1; 2002 } 2003 2004 /* begin to receive SETUP packets */ 2005 dwc->ep0state = EP0_SETUP_PHASE; 2006 dwc->link_state = DWC3_LINK_STATE_SS_DIS; 2007 dwc3_ep0_out_start(dwc); 2008 2009 dwc3_gadget_enable_irq(dwc); 2010 2011 return 0; 2012 2013 err1: 2014 __dwc3_gadget_ep_disable(dwc->eps[0]); 2015 2016 err0: 2017 return ret; 2018 } 2019 2020 static int dwc3_gadget_start(struct usb_gadget *g, 2021 struct usb_gadget_driver *driver) 2022 { 2023 struct dwc3 *dwc = gadget_to_dwc(g); 2024 unsigned long flags; 2025 int ret = 0; 2026 int irq; 2027 2028 irq = dwc->irq_gadget; 2029 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 2030 IRQF_SHARED, "dwc3", dwc->ev_buf); 2031 if (ret) { 2032 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2033 irq, ret); 2034 goto err0; 2035 } 2036 2037 spin_lock_irqsave(&dwc->lock, flags); 2038 if (dwc->gadget_driver) { 2039 dev_err(dwc->dev, "%s is already bound to %s\n", 2040 dwc->gadget.name, 2041 dwc->gadget_driver->driver.name); 2042 ret = -EBUSY; 2043 goto err1; 2044 } 2045 2046 dwc->gadget_driver = driver; 2047 2048 if (pm_runtime_active(dwc->dev)) 2049 __dwc3_gadget_start(dwc); 2050 2051 spin_unlock_irqrestore(&dwc->lock, flags); 2052 2053 return 0; 2054 2055 err1: 2056 spin_unlock_irqrestore(&dwc->lock, flags); 2057 free_irq(irq, dwc); 2058 2059 err0: 2060 return ret; 2061 } 2062 2063 static void __dwc3_gadget_stop(struct dwc3 *dwc) 2064 { 2065 dwc3_gadget_disable_irq(dwc); 2066 __dwc3_gadget_ep_disable(dwc->eps[0]); 2067 __dwc3_gadget_ep_disable(dwc->eps[1]); 2068 } 2069 2070 static int dwc3_gadget_stop(struct usb_gadget *g) 2071 { 2072 struct dwc3 *dwc = gadget_to_dwc(g); 2073 unsigned long flags; 2074 2075 spin_lock_irqsave(&dwc->lock, flags); 2076 2077 if (pm_runtime_suspended(dwc->dev)) 2078 goto out; 2079 2080 __dwc3_gadget_stop(dwc); 2081 2082 out: 2083 dwc->gadget_driver = NULL; 2084 spin_unlock_irqrestore(&dwc->lock, flags); 2085 2086 free_irq(dwc->irq_gadget, dwc->ev_buf); 2087 2088 return 0; 2089 } 2090 2091 static void dwc3_gadget_config_params(struct usb_gadget *g, 2092 struct usb_dcd_config_params *params) 2093 { 2094 struct dwc3 *dwc = gadget_to_dwc(g); 2095 2096 params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED; 2097 params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED; 2098 2099 /* Recommended BESL */ 2100 if (!dwc->dis_enblslpm_quirk) { 2101 /* 2102 * If the recommended BESL baseline is 0 or if the BESL deep is 2103 * less than 2, Microsoft's Windows 10 host usb stack will issue 2104 * a usb reset immediately after it receives the extended BOS 2105 * descriptor and the enumeration will fail. To maintain 2106 * compatibility with the Windows' usb stack, let's set the 2107 * recommended BESL baseline to 1 and clamp the BESL deep to be 2108 * within 2 to 15. 2109 */ 2110 params->besl_baseline = 1; 2111 if (dwc->is_utmi_l1_suspend) 2112 params->besl_deep = 2113 clamp_t(u8, dwc->hird_threshold, 2, 15); 2114 } 2115 2116 /* U1 Device exit Latency */ 2117 if (dwc->dis_u1_entry_quirk) 2118 params->bU1devExitLat = 0; 2119 else 2120 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT; 2121 2122 /* U2 Device exit Latency */ 2123 if (dwc->dis_u2_entry_quirk) 2124 params->bU2DevExitLat = 0; 2125 else 2126 params->bU2DevExitLat = 2127 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT); 2128 } 2129 2130 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2131 enum usb_device_speed speed) 2132 { 2133 struct dwc3 *dwc = gadget_to_dwc(g); 2134 unsigned long flags; 2135 u32 reg; 2136 2137 spin_lock_irqsave(&dwc->lock, flags); 2138 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2139 reg &= ~(DWC3_DCFG_SPEED_MASK); 2140 2141 /* 2142 * WORKAROUND: DWC3 revision < 2.20a have an issue 2143 * which would cause metastability state on Run/Stop 2144 * bit if we try to force the IP to USB2-only mode. 2145 * 2146 * Because of that, we cannot configure the IP to any 2147 * speed other than the SuperSpeed 2148 * 2149 * Refers to: 2150 * 2151 * STAR#9000525659: Clock Domain Crossing on DCTL in 2152 * USB 2.0 Mode 2153 */ 2154 if (dwc->revision < DWC3_REVISION_220A && 2155 !dwc->dis_metastability_quirk) { 2156 reg |= DWC3_DCFG_SUPERSPEED; 2157 } else { 2158 switch (speed) { 2159 case USB_SPEED_LOW: 2160 reg |= DWC3_DCFG_LOWSPEED; 2161 break; 2162 case USB_SPEED_FULL: 2163 reg |= DWC3_DCFG_FULLSPEED; 2164 break; 2165 case USB_SPEED_HIGH: 2166 reg |= DWC3_DCFG_HIGHSPEED; 2167 break; 2168 case USB_SPEED_SUPER: 2169 reg |= DWC3_DCFG_SUPERSPEED; 2170 break; 2171 case USB_SPEED_SUPER_PLUS: 2172 if (dwc3_is_usb31(dwc)) 2173 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2174 else 2175 reg |= DWC3_DCFG_SUPERSPEED; 2176 break; 2177 default: 2178 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2179 2180 if (dwc->revision & DWC3_REVISION_IS_DWC31) 2181 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2182 else 2183 reg |= DWC3_DCFG_SUPERSPEED; 2184 } 2185 } 2186 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2187 2188 spin_unlock_irqrestore(&dwc->lock, flags); 2189 } 2190 2191 static const struct usb_gadget_ops dwc3_gadget_ops = { 2192 .get_frame = dwc3_gadget_get_frame, 2193 .wakeup = dwc3_gadget_wakeup, 2194 .set_selfpowered = dwc3_gadget_set_selfpowered, 2195 .pullup = dwc3_gadget_pullup, 2196 .udc_start = dwc3_gadget_start, 2197 .udc_stop = dwc3_gadget_stop, 2198 .udc_set_speed = dwc3_gadget_set_speed, 2199 .get_config_params = dwc3_gadget_config_params, 2200 }; 2201 2202 /* -------------------------------------------------------------------------- */ 2203 2204 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2205 { 2206 struct dwc3 *dwc = dep->dwc; 2207 2208 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2209 dep->endpoint.maxburst = 1; 2210 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2211 if (!dep->direction) 2212 dwc->gadget.ep0 = &dep->endpoint; 2213 2214 dep->endpoint.caps.type_control = true; 2215 2216 return 0; 2217 } 2218 2219 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2220 { 2221 struct dwc3 *dwc = dep->dwc; 2222 int mdwidth; 2223 int kbytes; 2224 int size; 2225 2226 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2227 /* MDWIDTH is represented in bits, we need it in bytes */ 2228 mdwidth /= 8; 2229 2230 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2231 if (dwc3_is_usb31(dwc)) 2232 size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2233 else 2234 size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2235 2236 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2237 size *= mdwidth; 2238 2239 kbytes = size / 1024; 2240 if (kbytes == 0) 2241 kbytes = 1; 2242 2243 /* 2244 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2245 * internal overhead. We don't really know how these are used, 2246 * but documentation say it exists. 2247 */ 2248 size -= mdwidth * (kbytes + 1); 2249 size /= kbytes; 2250 2251 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2252 2253 dep->endpoint.max_streams = 15; 2254 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2255 list_add_tail(&dep->endpoint.ep_list, 2256 &dwc->gadget.ep_list); 2257 dep->endpoint.caps.type_iso = true; 2258 dep->endpoint.caps.type_bulk = true; 2259 dep->endpoint.caps.type_int = true; 2260 2261 return dwc3_alloc_trb_pool(dep); 2262 } 2263 2264 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2265 { 2266 struct dwc3 *dwc = dep->dwc; 2267 2268 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2269 dep->endpoint.max_streams = 15; 2270 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2271 list_add_tail(&dep->endpoint.ep_list, 2272 &dwc->gadget.ep_list); 2273 dep->endpoint.caps.type_iso = true; 2274 dep->endpoint.caps.type_bulk = true; 2275 dep->endpoint.caps.type_int = true; 2276 2277 return dwc3_alloc_trb_pool(dep); 2278 } 2279 2280 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 2281 { 2282 struct dwc3_ep *dep; 2283 bool direction = epnum & 1; 2284 int ret; 2285 u8 num = epnum >> 1; 2286 2287 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2288 if (!dep) 2289 return -ENOMEM; 2290 2291 dep->dwc = dwc; 2292 dep->number = epnum; 2293 dep->direction = direction; 2294 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2295 dwc->eps[epnum] = dep; 2296 dep->combo_num = 0; 2297 dep->start_cmd_status = 0; 2298 2299 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2300 direction ? "in" : "out"); 2301 2302 dep->endpoint.name = dep->name; 2303 2304 if (!(dep->number > 1)) { 2305 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2306 dep->endpoint.comp_desc = NULL; 2307 } 2308 2309 if (num == 0) 2310 ret = dwc3_gadget_init_control_endpoint(dep); 2311 else if (direction) 2312 ret = dwc3_gadget_init_in_endpoint(dep); 2313 else 2314 ret = dwc3_gadget_init_out_endpoint(dep); 2315 2316 if (ret) 2317 return ret; 2318 2319 dep->endpoint.caps.dir_in = direction; 2320 dep->endpoint.caps.dir_out = !direction; 2321 2322 INIT_LIST_HEAD(&dep->pending_list); 2323 INIT_LIST_HEAD(&dep->started_list); 2324 INIT_LIST_HEAD(&dep->cancelled_list); 2325 2326 return 0; 2327 } 2328 2329 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2330 { 2331 u8 epnum; 2332 2333 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2334 2335 for (epnum = 0; epnum < total; epnum++) { 2336 int ret; 2337 2338 ret = dwc3_gadget_init_endpoint(dwc, epnum); 2339 if (ret) 2340 return ret; 2341 } 2342 2343 return 0; 2344 } 2345 2346 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2347 { 2348 struct dwc3_ep *dep; 2349 u8 epnum; 2350 2351 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2352 dep = dwc->eps[epnum]; 2353 if (!dep) 2354 continue; 2355 /* 2356 * Physical endpoints 0 and 1 are special; they form the 2357 * bi-directional USB endpoint 0. 2358 * 2359 * For those two physical endpoints, we don't allocate a TRB 2360 * pool nor do we add them the endpoints list. Due to that, we 2361 * shouldn't do these two operations otherwise we would end up 2362 * with all sorts of bugs when removing dwc3.ko. 2363 */ 2364 if (epnum != 0 && epnum != 1) { 2365 dwc3_free_trb_pool(dep); 2366 list_del(&dep->endpoint.ep_list); 2367 } 2368 2369 kfree(dep); 2370 } 2371 } 2372 2373 /* -------------------------------------------------------------------------- */ 2374 2375 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 2376 struct dwc3_request *req, struct dwc3_trb *trb, 2377 const struct dwc3_event_depevt *event, int status, int chain) 2378 { 2379 unsigned int count; 2380 2381 dwc3_ep_inc_deq(dep); 2382 2383 trace_dwc3_complete_trb(dep, trb); 2384 req->num_trbs--; 2385 2386 /* 2387 * If we're in the middle of series of chained TRBs and we 2388 * receive a short transfer along the way, DWC3 will skip 2389 * through all TRBs including the last TRB in the chain (the 2390 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2391 * bit and SW has to do it manually. 2392 * 2393 * We're going to do that here to avoid problems of HW trying 2394 * to use bogus TRBs for transfers. 2395 */ 2396 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2397 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2398 2399 /* 2400 * For isochronous transfers, the first TRB in a service interval must 2401 * have the Isoc-First type. Track and report its interval frame number. 2402 */ 2403 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2404 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) { 2405 unsigned int frame_number; 2406 2407 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl); 2408 frame_number &= ~(dep->interval - 1); 2409 req->request.frame_number = frame_number; 2410 } 2411 2412 /* 2413 * If we're dealing with unaligned size OUT transfer, we will be left 2414 * with one TRB pending in the ring. We need to manually clear HWO bit 2415 * from that TRB. 2416 */ 2417 2418 if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { 2419 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2420 return 1; 2421 } 2422 2423 count = trb->size & DWC3_TRB_SIZE_MASK; 2424 req->remaining += count; 2425 2426 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2427 return 1; 2428 2429 if (event->status & DEPEVT_STATUS_SHORT && !chain) 2430 return 1; 2431 2432 if (event->status & DEPEVT_STATUS_IOC) 2433 return 1; 2434 2435 return 0; 2436 } 2437 2438 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 2439 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2440 int status) 2441 { 2442 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2443 struct scatterlist *sg = req->sg; 2444 struct scatterlist *s; 2445 unsigned int pending = req->num_pending_sgs; 2446 unsigned int i; 2447 int ret = 0; 2448 2449 for_each_sg(sg, s, pending, i) { 2450 trb = &dep->trb_pool[dep->trb_dequeue]; 2451 2452 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2453 break; 2454 2455 req->sg = sg_next(s); 2456 req->num_pending_sgs--; 2457 2458 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 2459 trb, event, status, true); 2460 if (ret) 2461 break; 2462 } 2463 2464 return ret; 2465 } 2466 2467 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 2468 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2469 int status) 2470 { 2471 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2472 2473 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 2474 event, status, false); 2475 } 2476 2477 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2478 { 2479 /* 2480 * For OUT direction, host may send less than the setup 2481 * length. Return true for all OUT requests. 2482 */ 2483 if (!req->direction) 2484 return true; 2485 2486 return req->request.actual == req->request.length; 2487 } 2488 2489 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 2490 const struct dwc3_event_depevt *event, 2491 struct dwc3_request *req, int status) 2492 { 2493 int ret; 2494 2495 if (req->num_pending_sgs) 2496 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 2497 status); 2498 else 2499 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2500 status); 2501 2502 if (req->needs_extra_trb) { 2503 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2504 status); 2505 req->needs_extra_trb = false; 2506 } 2507 2508 req->request.actual = req->request.length - req->remaining; 2509 2510 if (!dwc3_gadget_ep_request_completed(req) || 2511 req->num_pending_sgs) { 2512 __dwc3_gadget_kick_transfer(dep); 2513 goto out; 2514 } 2515 2516 dwc3_gadget_giveback(dep, req, status); 2517 2518 out: 2519 return ret; 2520 } 2521 2522 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 2523 const struct dwc3_event_depevt *event, int status) 2524 { 2525 struct dwc3_request *req; 2526 struct dwc3_request *tmp; 2527 2528 list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2529 int ret; 2530 2531 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 2532 req, status); 2533 if (ret) 2534 break; 2535 } 2536 } 2537 2538 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 2539 const struct dwc3_event_depevt *event) 2540 { 2541 dep->frame_number = event->parameters; 2542 } 2543 2544 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 2545 const struct dwc3_event_depevt *event) 2546 { 2547 struct dwc3 *dwc = dep->dwc; 2548 unsigned status = 0; 2549 bool stop = false; 2550 2551 dwc3_gadget_endpoint_frame_from_event(dep, event); 2552 2553 if (event->status & DEPEVT_STATUS_BUSERR) 2554 status = -ECONNRESET; 2555 2556 if (event->status & DEPEVT_STATUS_MISSED_ISOC) { 2557 status = -EXDEV; 2558 2559 if (list_empty(&dep->started_list)) 2560 stop = true; 2561 } 2562 2563 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 2564 2565 if (stop) { 2566 dwc3_stop_active_transfer(dep, true, true); 2567 dep->flags = DWC3_EP_ENABLED; 2568 } 2569 2570 /* 2571 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2572 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2573 */ 2574 if (dwc->revision < DWC3_REVISION_183A) { 2575 u32 reg; 2576 int i; 2577 2578 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2579 dep = dwc->eps[i]; 2580 2581 if (!(dep->flags & DWC3_EP_ENABLED)) 2582 continue; 2583 2584 if (!list_empty(&dep->started_list)) 2585 return; 2586 } 2587 2588 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2589 reg |= dwc->u1u2; 2590 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2591 2592 dwc->u1u2 = 0; 2593 } 2594 } 2595 2596 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 2597 const struct dwc3_event_depevt *event) 2598 { 2599 dwc3_gadget_endpoint_frame_from_event(dep, event); 2600 (void) __dwc3_gadget_start_isoc(dep); 2601 } 2602 2603 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2604 const struct dwc3_event_depevt *event) 2605 { 2606 struct dwc3_ep *dep; 2607 u8 epnum = event->endpoint_number; 2608 u8 cmd; 2609 2610 dep = dwc->eps[epnum]; 2611 2612 if (!(dep->flags & DWC3_EP_ENABLED)) { 2613 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) 2614 return; 2615 2616 /* Handle only EPCMDCMPLT when EP disabled */ 2617 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2618 return; 2619 } 2620 2621 if (epnum == 0 || epnum == 1) { 2622 dwc3_ep0_interrupt(dwc, event); 2623 return; 2624 } 2625 2626 switch (event->endpoint_event) { 2627 case DWC3_DEPEVT_XFERINPROGRESS: 2628 dwc3_gadget_endpoint_transfer_in_progress(dep, event); 2629 break; 2630 case DWC3_DEPEVT_XFERNOTREADY: 2631 dwc3_gadget_endpoint_transfer_not_ready(dep, event); 2632 break; 2633 case DWC3_DEPEVT_EPCMDCMPLT: 2634 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2635 2636 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2637 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2638 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2639 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 2640 if ((dep->flags & DWC3_EP_DELAY_START) && 2641 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2642 __dwc3_gadget_kick_transfer(dep); 2643 2644 dep->flags &= ~DWC3_EP_DELAY_START; 2645 } 2646 break; 2647 case DWC3_DEPEVT_STREAMEVT: 2648 case DWC3_DEPEVT_XFERCOMPLETE: 2649 case DWC3_DEPEVT_RXTXFIFOEVT: 2650 break; 2651 } 2652 } 2653 2654 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2655 { 2656 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2657 spin_unlock(&dwc->lock); 2658 dwc->gadget_driver->disconnect(&dwc->gadget); 2659 spin_lock(&dwc->lock); 2660 } 2661 } 2662 2663 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2664 { 2665 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2666 spin_unlock(&dwc->lock); 2667 dwc->gadget_driver->suspend(&dwc->gadget); 2668 spin_lock(&dwc->lock); 2669 } 2670 } 2671 2672 static void dwc3_resume_gadget(struct dwc3 *dwc) 2673 { 2674 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2675 spin_unlock(&dwc->lock); 2676 dwc->gadget_driver->resume(&dwc->gadget); 2677 spin_lock(&dwc->lock); 2678 } 2679 } 2680 2681 static void dwc3_reset_gadget(struct dwc3 *dwc) 2682 { 2683 if (!dwc->gadget_driver) 2684 return; 2685 2686 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2687 spin_unlock(&dwc->lock); 2688 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2689 spin_lock(&dwc->lock); 2690 } 2691 } 2692 2693 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 2694 bool interrupt) 2695 { 2696 struct dwc3_gadget_ep_cmd_params params; 2697 u32 cmd; 2698 int ret; 2699 2700 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || 2701 (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2702 return; 2703 2704 /* 2705 * NOTICE: We are violating what the Databook says about the 2706 * EndTransfer command. Ideally we would _always_ wait for the 2707 * EndTransfer Command Completion IRQ, but that's causing too 2708 * much trouble synchronizing between us and gadget driver. 2709 * 2710 * We have discussed this with the IP Provider and it was 2711 * suggested to giveback all requests here. 2712 * 2713 * Note also that a similar handling was tested by Synopsys 2714 * (thanks a lot Paul) and nothing bad has come out of it. 2715 * In short, what we're doing is issuing EndTransfer with 2716 * CMDIOC bit set and delay kicking transfer until the 2717 * EndTransfer command had completed. 2718 * 2719 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2720 * supports a mode to work around the above limitation. The 2721 * software can poll the CMDACT bit in the DEPCMD register 2722 * after issuing a EndTransfer command. This mode is enabled 2723 * by writing GUCTL2[14]. This polling is already done in the 2724 * dwc3_send_gadget_ep_cmd() function so if the mode is 2725 * enabled, the EndTransfer command will have completed upon 2726 * returning from this function. 2727 * 2728 * This mode is NOT available on the DWC_usb31 IP. 2729 */ 2730 2731 cmd = DWC3_DEPCMD_ENDTRANSFER; 2732 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2733 cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0; 2734 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2735 memset(¶ms, 0, sizeof(params)); 2736 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2737 WARN_ON_ONCE(ret); 2738 dep->resource_index = 0; 2739 2740 if (!interrupt) 2741 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2742 else 2743 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2744 } 2745 2746 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2747 { 2748 u32 epnum; 2749 2750 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2751 struct dwc3_ep *dep; 2752 int ret; 2753 2754 dep = dwc->eps[epnum]; 2755 if (!dep) 2756 continue; 2757 2758 if (!(dep->flags & DWC3_EP_STALL)) 2759 continue; 2760 2761 dep->flags &= ~DWC3_EP_STALL; 2762 2763 ret = dwc3_send_clear_stall_ep_cmd(dep); 2764 WARN_ON_ONCE(ret); 2765 } 2766 } 2767 2768 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2769 { 2770 int reg; 2771 2772 dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); 2773 2774 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2775 reg &= ~DWC3_DCTL_INITU1ENA; 2776 reg &= ~DWC3_DCTL_INITU2ENA; 2777 dwc3_gadget_dctl_write_safe(dwc, reg); 2778 2779 dwc3_disconnect_gadget(dwc); 2780 2781 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2782 dwc->setup_packet_pending = false; 2783 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2784 2785 dwc->connected = false; 2786 } 2787 2788 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2789 { 2790 u32 reg; 2791 2792 dwc->connected = true; 2793 2794 /* 2795 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2796 * would cause a missing Disconnect Event if there's a 2797 * pending Setup Packet in the FIFO. 2798 * 2799 * There's no suggested workaround on the official Bug 2800 * report, which states that "unless the driver/application 2801 * is doing any special handling of a disconnect event, 2802 * there is no functional issue". 2803 * 2804 * Unfortunately, it turns out that we _do_ some special 2805 * handling of a disconnect event, namely complete all 2806 * pending transfers, notify gadget driver of the 2807 * disconnection, and so on. 2808 * 2809 * Our suggested workaround is to follow the Disconnect 2810 * Event steps here, instead, based on a setup_packet_pending 2811 * flag. Such flag gets set whenever we have a SETUP_PENDING 2812 * status for EP0 TRBs and gets cleared on XferComplete for the 2813 * same endpoint. 2814 * 2815 * Refers to: 2816 * 2817 * STAR#9000466709: RTL: Device : Disconnect event not 2818 * generated if setup packet pending in FIFO 2819 */ 2820 if (dwc->revision < DWC3_REVISION_188A) { 2821 if (dwc->setup_packet_pending) 2822 dwc3_gadget_disconnect_interrupt(dwc); 2823 } 2824 2825 dwc3_reset_gadget(dwc); 2826 2827 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2828 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2829 dwc3_gadget_dctl_write_safe(dwc, reg); 2830 dwc->test_mode = false; 2831 dwc3_clear_stall_all_ep(dwc); 2832 2833 /* Reset device address to zero */ 2834 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2835 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2836 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2837 } 2838 2839 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2840 { 2841 struct dwc3_ep *dep; 2842 int ret; 2843 u32 reg; 2844 u8 speed; 2845 2846 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2847 speed = reg & DWC3_DSTS_CONNECTSPD; 2848 dwc->speed = speed; 2849 2850 /* 2851 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2852 * each time on Connect Done. 2853 * 2854 * Currently we always use the reset value. If any platform 2855 * wants to set this to a different value, we need to add a 2856 * setting and update GCTL.RAMCLKSEL here. 2857 */ 2858 2859 switch (speed) { 2860 case DWC3_DSTS_SUPERSPEED_PLUS: 2861 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2862 dwc->gadget.ep0->maxpacket = 512; 2863 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2864 break; 2865 case DWC3_DSTS_SUPERSPEED: 2866 /* 2867 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2868 * would cause a missing USB3 Reset event. 2869 * 2870 * In such situations, we should force a USB3 Reset 2871 * event by calling our dwc3_gadget_reset_interrupt() 2872 * routine. 2873 * 2874 * Refers to: 2875 * 2876 * STAR#9000483510: RTL: SS : USB3 reset event may 2877 * not be generated always when the link enters poll 2878 */ 2879 if (dwc->revision < DWC3_REVISION_190A) 2880 dwc3_gadget_reset_interrupt(dwc); 2881 2882 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2883 dwc->gadget.ep0->maxpacket = 512; 2884 dwc->gadget.speed = USB_SPEED_SUPER; 2885 break; 2886 case DWC3_DSTS_HIGHSPEED: 2887 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2888 dwc->gadget.ep0->maxpacket = 64; 2889 dwc->gadget.speed = USB_SPEED_HIGH; 2890 break; 2891 case DWC3_DSTS_FULLSPEED: 2892 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2893 dwc->gadget.ep0->maxpacket = 64; 2894 dwc->gadget.speed = USB_SPEED_FULL; 2895 break; 2896 case DWC3_DSTS_LOWSPEED: 2897 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2898 dwc->gadget.ep0->maxpacket = 8; 2899 dwc->gadget.speed = USB_SPEED_LOW; 2900 break; 2901 } 2902 2903 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; 2904 2905 /* Enable USB2 LPM Capability */ 2906 2907 if ((dwc->revision > DWC3_REVISION_194A) && 2908 (speed != DWC3_DSTS_SUPERSPEED) && 2909 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2910 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2911 reg |= DWC3_DCFG_LPM_CAP; 2912 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2913 2914 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2915 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2916 2917 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold | 2918 (dwc->is_utmi_l1_suspend << 4)); 2919 2920 /* 2921 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2922 * DCFG.LPMCap is set, core responses with an ACK and the 2923 * BESL value in the LPM token is less than or equal to LPM 2924 * NYET threshold. 2925 */ 2926 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2927 && dwc->has_lpm_erratum, 2928 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2929 2930 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2931 reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold); 2932 2933 dwc3_gadget_dctl_write_safe(dwc, reg); 2934 } else { 2935 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2936 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2937 dwc3_gadget_dctl_write_safe(dwc, reg); 2938 } 2939 2940 dep = dwc->eps[0]; 2941 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2942 if (ret) { 2943 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2944 return; 2945 } 2946 2947 dep = dwc->eps[1]; 2948 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2949 if (ret) { 2950 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2951 return; 2952 } 2953 2954 /* 2955 * Configure PHY via GUSB3PIPECTLn if required. 2956 * 2957 * Update GTXFIFOSIZn 2958 * 2959 * In both cases reset values should be sufficient. 2960 */ 2961 } 2962 2963 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2964 { 2965 /* 2966 * TODO take core out of low power mode when that's 2967 * implemented. 2968 */ 2969 2970 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2971 spin_unlock(&dwc->lock); 2972 dwc->gadget_driver->resume(&dwc->gadget); 2973 spin_lock(&dwc->lock); 2974 } 2975 } 2976 2977 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2978 unsigned int evtinfo) 2979 { 2980 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2981 unsigned int pwropt; 2982 2983 /* 2984 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2985 * Hibernation mode enabled which would show up when device detects 2986 * host-initiated U3 exit. 2987 * 2988 * In that case, device will generate a Link State Change Interrupt 2989 * from U3 to RESUME which is only necessary if Hibernation is 2990 * configured in. 2991 * 2992 * There are no functional changes due to such spurious event and we 2993 * just need to ignore it. 2994 * 2995 * Refers to: 2996 * 2997 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2998 * operational mode 2999 */ 3000 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 3001 if ((dwc->revision < DWC3_REVISION_250A) && 3002 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 3003 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 3004 (next == DWC3_LINK_STATE_RESUME)) { 3005 return; 3006 } 3007 } 3008 3009 /* 3010 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 3011 * on the link partner, the USB session might do multiple entry/exit 3012 * of low power states before a transfer takes place. 3013 * 3014 * Due to this problem, we might experience lower throughput. The 3015 * suggested workaround is to disable DCTL[12:9] bits if we're 3016 * transitioning from U1/U2 to U0 and enable those bits again 3017 * after a transfer completes and there are no pending transfers 3018 * on any of the enabled endpoints. 3019 * 3020 * This is the first half of that workaround. 3021 * 3022 * Refers to: 3023 * 3024 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 3025 * core send LGO_Ux entering U0 3026 */ 3027 if (dwc->revision < DWC3_REVISION_183A) { 3028 if (next == DWC3_LINK_STATE_U0) { 3029 u32 u1u2; 3030 u32 reg; 3031 3032 switch (dwc->link_state) { 3033 case DWC3_LINK_STATE_U1: 3034 case DWC3_LINK_STATE_U2: 3035 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3036 u1u2 = reg & (DWC3_DCTL_INITU2ENA 3037 | DWC3_DCTL_ACCEPTU2ENA 3038 | DWC3_DCTL_INITU1ENA 3039 | DWC3_DCTL_ACCEPTU1ENA); 3040 3041 if (!dwc->u1u2) 3042 dwc->u1u2 = reg & u1u2; 3043 3044 reg &= ~u1u2; 3045 3046 dwc3_gadget_dctl_write_safe(dwc, reg); 3047 break; 3048 default: 3049 /* do nothing */ 3050 break; 3051 } 3052 } 3053 } 3054 3055 switch (next) { 3056 case DWC3_LINK_STATE_U1: 3057 if (dwc->speed == USB_SPEED_SUPER) 3058 dwc3_suspend_gadget(dwc); 3059 break; 3060 case DWC3_LINK_STATE_U2: 3061 case DWC3_LINK_STATE_U3: 3062 dwc3_suspend_gadget(dwc); 3063 break; 3064 case DWC3_LINK_STATE_RESUME: 3065 dwc3_resume_gadget(dwc); 3066 break; 3067 default: 3068 /* do nothing */ 3069 break; 3070 } 3071 3072 dwc->link_state = next; 3073 } 3074 3075 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 3076 unsigned int evtinfo) 3077 { 3078 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 3079 3080 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 3081 dwc3_suspend_gadget(dwc); 3082 3083 dwc->link_state = next; 3084 } 3085 3086 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 3087 unsigned int evtinfo) 3088 { 3089 unsigned int is_ss = evtinfo & BIT(4); 3090 3091 /* 3092 * WORKAROUND: DWC3 revison 2.20a with hibernation support 3093 * have a known issue which can cause USB CV TD.9.23 to fail 3094 * randomly. 3095 * 3096 * Because of this issue, core could generate bogus hibernation 3097 * events which SW needs to ignore. 3098 * 3099 * Refers to: 3100 * 3101 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 3102 * Device Fallback from SuperSpeed 3103 */ 3104 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 3105 return; 3106 3107 /* enter hibernation here */ 3108 } 3109 3110 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 3111 const struct dwc3_event_devt *event) 3112 { 3113 switch (event->type) { 3114 case DWC3_DEVICE_EVENT_DISCONNECT: 3115 dwc3_gadget_disconnect_interrupt(dwc); 3116 break; 3117 case DWC3_DEVICE_EVENT_RESET: 3118 dwc3_gadget_reset_interrupt(dwc); 3119 break; 3120 case DWC3_DEVICE_EVENT_CONNECT_DONE: 3121 dwc3_gadget_conndone_interrupt(dwc); 3122 break; 3123 case DWC3_DEVICE_EVENT_WAKEUP: 3124 dwc3_gadget_wakeup_interrupt(dwc); 3125 break; 3126 case DWC3_DEVICE_EVENT_HIBER_REQ: 3127 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 3128 "unexpected hibernation event\n")) 3129 break; 3130 3131 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 3132 break; 3133 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 3134 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 3135 break; 3136 case DWC3_DEVICE_EVENT_EOPF: 3137 /* It changed to be suspend event for version 2.30a and above */ 3138 if (dwc->revision >= DWC3_REVISION_230A) { 3139 /* 3140 * Ignore suspend event until the gadget enters into 3141 * USB_STATE_CONFIGURED state. 3142 */ 3143 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 3144 dwc3_gadget_suspend_interrupt(dwc, 3145 event->event_info); 3146 } 3147 break; 3148 case DWC3_DEVICE_EVENT_SOF: 3149 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 3150 case DWC3_DEVICE_EVENT_CMD_CMPL: 3151 case DWC3_DEVICE_EVENT_OVERFLOW: 3152 break; 3153 default: 3154 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 3155 } 3156 } 3157 3158 static void dwc3_process_event_entry(struct dwc3 *dwc, 3159 const union dwc3_event *event) 3160 { 3161 trace_dwc3_event(event->raw, dwc); 3162 3163 if (!event->type.is_devspec) 3164 dwc3_endpoint_interrupt(dwc, &event->depevt); 3165 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 3166 dwc3_gadget_interrupt(dwc, &event->devt); 3167 else 3168 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 3169 } 3170 3171 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 3172 { 3173 struct dwc3 *dwc = evt->dwc; 3174 irqreturn_t ret = IRQ_NONE; 3175 int left; 3176 u32 reg; 3177 3178 left = evt->count; 3179 3180 if (!(evt->flags & DWC3_EVENT_PENDING)) 3181 return IRQ_NONE; 3182 3183 while (left > 0) { 3184 union dwc3_event event; 3185 3186 event.raw = *(u32 *) (evt->cache + evt->lpos); 3187 3188 dwc3_process_event_entry(dwc, &event); 3189 3190 /* 3191 * FIXME we wrap around correctly to the next entry as 3192 * almost all entries are 4 bytes in size. There is one 3193 * entry which has 12 bytes which is a regular entry 3194 * followed by 8 bytes data. ATM I don't know how 3195 * things are organized if we get next to the a 3196 * boundary so I worry about that once we try to handle 3197 * that. 3198 */ 3199 evt->lpos = (evt->lpos + 4) % evt->length; 3200 left -= 4; 3201 } 3202 3203 evt->count = 0; 3204 evt->flags &= ~DWC3_EVENT_PENDING; 3205 ret = IRQ_HANDLED; 3206 3207 /* Unmask interrupt */ 3208 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3209 reg &= ~DWC3_GEVNTSIZ_INTMASK; 3210 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3211 3212 if (dwc->imod_interval) { 3213 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 3214 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 3215 } 3216 3217 return ret; 3218 } 3219 3220 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 3221 { 3222 struct dwc3_event_buffer *evt = _evt; 3223 struct dwc3 *dwc = evt->dwc; 3224 unsigned long flags; 3225 irqreturn_t ret = IRQ_NONE; 3226 3227 spin_lock_irqsave(&dwc->lock, flags); 3228 ret = dwc3_process_event_buf(evt); 3229 spin_unlock_irqrestore(&dwc->lock, flags); 3230 3231 return ret; 3232 } 3233 3234 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 3235 { 3236 struct dwc3 *dwc = evt->dwc; 3237 u32 amount; 3238 u32 count; 3239 u32 reg; 3240 3241 if (pm_runtime_suspended(dwc->dev)) { 3242 pm_runtime_get(dwc->dev); 3243 disable_irq_nosync(dwc->irq_gadget); 3244 dwc->pending_events = true; 3245 return IRQ_HANDLED; 3246 } 3247 3248 /* 3249 * With PCIe legacy interrupt, test shows that top-half irq handler can 3250 * be called again after HW interrupt deassertion. Check if bottom-half 3251 * irq event handler completes before caching new event to prevent 3252 * losing events. 3253 */ 3254 if (evt->flags & DWC3_EVENT_PENDING) 3255 return IRQ_HANDLED; 3256 3257 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3258 count &= DWC3_GEVNTCOUNT_MASK; 3259 if (!count) 3260 return IRQ_NONE; 3261 3262 evt->count = count; 3263 evt->flags |= DWC3_EVENT_PENDING; 3264 3265 /* Mask interrupt */ 3266 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3267 reg |= DWC3_GEVNTSIZ_INTMASK; 3268 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3269 3270 amount = min(count, evt->length - evt->lpos); 3271 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3272 3273 if (amount < count) 3274 memcpy(evt->cache, evt->buf, count - amount); 3275 3276 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3277 3278 return IRQ_WAKE_THREAD; 3279 } 3280 3281 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3282 { 3283 struct dwc3_event_buffer *evt = _evt; 3284 3285 return dwc3_check_event_buf(evt); 3286 } 3287 3288 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3289 { 3290 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3291 int irq; 3292 3293 irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral"); 3294 if (irq > 0) 3295 goto out; 3296 3297 if (irq == -EPROBE_DEFER) 3298 goto out; 3299 3300 irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); 3301 if (irq > 0) 3302 goto out; 3303 3304 if (irq == -EPROBE_DEFER) 3305 goto out; 3306 3307 irq = platform_get_irq(dwc3_pdev, 0); 3308 if (irq > 0) 3309 goto out; 3310 3311 if (!irq) 3312 irq = -EINVAL; 3313 3314 out: 3315 return irq; 3316 } 3317 3318 /** 3319 * dwc3_gadget_init - initializes gadget related registers 3320 * @dwc: pointer to our controller context structure 3321 * 3322 * Returns 0 on success otherwise negative errno. 3323 */ 3324 int dwc3_gadget_init(struct dwc3 *dwc) 3325 { 3326 int ret; 3327 int irq; 3328 3329 irq = dwc3_gadget_get_irq(dwc); 3330 if (irq < 0) { 3331 ret = irq; 3332 goto err0; 3333 } 3334 3335 dwc->irq_gadget = irq; 3336 3337 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3338 sizeof(*dwc->ep0_trb) * 2, 3339 &dwc->ep0_trb_addr, GFP_KERNEL); 3340 if (!dwc->ep0_trb) { 3341 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3342 ret = -ENOMEM; 3343 goto err0; 3344 } 3345 3346 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 3347 if (!dwc->setup_buf) { 3348 ret = -ENOMEM; 3349 goto err1; 3350 } 3351 3352 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3353 &dwc->bounce_addr, GFP_KERNEL); 3354 if (!dwc->bounce) { 3355 ret = -ENOMEM; 3356 goto err2; 3357 } 3358 3359 init_completion(&dwc->ep0_in_setup); 3360 3361 dwc->gadget.ops = &dwc3_gadget_ops; 3362 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3363 dwc->gadget.sg_supported = true; 3364 dwc->gadget.name = "dwc3-gadget"; 3365 dwc->gadget.lpm_capable = true; 3366 3367 /* 3368 * FIXME We might be setting max_speed to <SUPER, however versions 3369 * <2.20a of dwc3 have an issue with metastability (documented 3370 * elsewhere in this driver) which tells us we can't set max speed to 3371 * anything lower than SUPER. 3372 * 3373 * Because gadget.max_speed is only used by composite.c and function 3374 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3375 * to happen so we avoid sending SuperSpeed Capability descriptor 3376 * together with our BOS descriptor as that could confuse host into 3377 * thinking we can handle super speed. 3378 * 3379 * Note that, in fact, we won't even support GetBOS requests when speed 3380 * is less than super speed because we don't have means, yet, to tell 3381 * composite.c that we are USB 2.0 + LPM ECN. 3382 */ 3383 if (dwc->revision < DWC3_REVISION_220A && 3384 !dwc->dis_metastability_quirk) 3385 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3386 dwc->revision); 3387 3388 dwc->gadget.max_speed = dwc->maximum_speed; 3389 3390 /* 3391 * REVISIT: Here we should clear all pending IRQs to be 3392 * sure we're starting from a well known location. 3393 */ 3394 3395 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 3396 if (ret) 3397 goto err3; 3398 3399 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3400 if (ret) { 3401 dev_err(dwc->dev, "failed to register udc\n"); 3402 goto err4; 3403 } 3404 3405 dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed); 3406 3407 return 0; 3408 3409 err4: 3410 dwc3_gadget_free_endpoints(dwc); 3411 3412 err3: 3413 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3414 dwc->bounce_addr); 3415 3416 err2: 3417 kfree(dwc->setup_buf); 3418 3419 err1: 3420 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3421 dwc->ep0_trb, dwc->ep0_trb_addr); 3422 3423 err0: 3424 return ret; 3425 } 3426 3427 /* -------------------------------------------------------------------------- */ 3428 3429 void dwc3_gadget_exit(struct dwc3 *dwc) 3430 { 3431 usb_del_gadget_udc(&dwc->gadget); 3432 dwc3_gadget_free_endpoints(dwc); 3433 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3434 dwc->bounce_addr); 3435 kfree(dwc->setup_buf); 3436 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3437 dwc->ep0_trb, dwc->ep0_trb_addr); 3438 } 3439 3440 int dwc3_gadget_suspend(struct dwc3 *dwc) 3441 { 3442 if (!dwc->gadget_driver) 3443 return 0; 3444 3445 dwc3_gadget_run_stop(dwc, false, false); 3446 dwc3_disconnect_gadget(dwc); 3447 __dwc3_gadget_stop(dwc); 3448 3449 return 0; 3450 } 3451 3452 int dwc3_gadget_resume(struct dwc3 *dwc) 3453 { 3454 int ret; 3455 3456 if (!dwc->gadget_driver) 3457 return 0; 3458 3459 ret = __dwc3_gadget_start(dwc); 3460 if (ret < 0) 3461 goto err0; 3462 3463 ret = dwc3_gadget_run_stop(dwc, true, false); 3464 if (ret < 0) 3465 goto err1; 3466 3467 return 0; 3468 3469 err1: 3470 __dwc3_gadget_stop(dwc); 3471 3472 err0: 3473 return ret; 3474 } 3475 3476 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3477 { 3478 if (dwc->pending_events) { 3479 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3480 dwc->pending_events = false; 3481 enable_irq(dwc->irq_gadget); 3482 } 3483 } 3484