1 /* 2 * core.c - DesignWare HS OTG Controller common routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * The Core code provides basic services for accessing and managing the 39 * DWC_otg hardware. These services are used by both the Host Controller 40 * Driver and the Peripheral Controller Driver. 41 */ 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <linux/interrupt.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/delay.h> 49 #include <linux/io.h> 50 #include <linux/slab.h> 51 #include <linux/usb.h> 52 53 #include <linux/usb/hcd.h> 54 #include <linux/usb/ch11.h> 55 56 #include "core.h" 57 #include "hcd.h" 58 59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 60 /** 61 * dwc2_backup_host_registers() - Backup controller host registers. 62 * When suspending usb bus, registers needs to be backuped 63 * if controller power is disabled once suspended. 64 * 65 * @hsotg: Programming view of the DWC_otg controller 66 */ 67 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 68 { 69 struct dwc2_hregs_backup *hr; 70 int i; 71 72 dev_dbg(hsotg->dev, "%s\n", __func__); 73 74 /* Backup Host regs */ 75 hr = &hsotg->hr_backup; 76 hr->hcfg = readl(hsotg->regs + HCFG); 77 hr->haintmsk = readl(hsotg->regs + HAINTMSK); 78 for (i = 0; i < hsotg->core_params->host_channels; ++i) 79 hr->hcintmsk[i] = readl(hsotg->regs + HCINTMSK(i)); 80 81 hr->hprt0 = readl(hsotg->regs + HPRT0); 82 hr->hfir = readl(hsotg->regs + HFIR); 83 hr->valid = true; 84 85 return 0; 86 } 87 88 /** 89 * dwc2_restore_host_registers() - Restore controller host registers. 90 * When resuming usb bus, device registers needs to be restored 91 * if controller power were disabled. 92 * 93 * @hsotg: Programming view of the DWC_otg controller 94 */ 95 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 96 { 97 struct dwc2_hregs_backup *hr; 98 int i; 99 100 dev_dbg(hsotg->dev, "%s\n", __func__); 101 102 /* Restore host regs */ 103 hr = &hsotg->hr_backup; 104 if (!hr->valid) { 105 dev_err(hsotg->dev, "%s: no host registers to restore\n", 106 __func__); 107 return -EINVAL; 108 } 109 hr->valid = false; 110 111 writel(hr->hcfg, hsotg->regs + HCFG); 112 writel(hr->haintmsk, hsotg->regs + HAINTMSK); 113 114 for (i = 0; i < hsotg->core_params->host_channels; ++i) 115 writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); 116 117 writel(hr->hprt0, hsotg->regs + HPRT0); 118 writel(hr->hfir, hsotg->regs + HFIR); 119 120 return 0; 121 } 122 #else 123 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 124 { return 0; } 125 126 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 127 { return 0; } 128 #endif 129 130 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 131 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 132 /** 133 * dwc2_backup_device_registers() - Backup controller device registers. 134 * When suspending usb bus, registers needs to be backuped 135 * if controller power is disabled once suspended. 136 * 137 * @hsotg: Programming view of the DWC_otg controller 138 */ 139 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 140 { 141 struct dwc2_dregs_backup *dr; 142 int i; 143 144 dev_dbg(hsotg->dev, "%s\n", __func__); 145 146 /* Backup dev regs */ 147 dr = &hsotg->dr_backup; 148 149 dr->dcfg = readl(hsotg->regs + DCFG); 150 dr->dctl = readl(hsotg->regs + DCTL); 151 dr->daintmsk = readl(hsotg->regs + DAINTMSK); 152 dr->diepmsk = readl(hsotg->regs + DIEPMSK); 153 dr->doepmsk = readl(hsotg->regs + DOEPMSK); 154 155 for (i = 0; i < hsotg->num_of_eps; i++) { 156 /* Backup IN EPs */ 157 dr->diepctl[i] = readl(hsotg->regs + DIEPCTL(i)); 158 159 /* Ensure DATA PID is correctly configured */ 160 if (dr->diepctl[i] & DXEPCTL_DPID) 161 dr->diepctl[i] |= DXEPCTL_SETD1PID; 162 else 163 dr->diepctl[i] |= DXEPCTL_SETD0PID; 164 165 dr->dieptsiz[i] = readl(hsotg->regs + DIEPTSIZ(i)); 166 dr->diepdma[i] = readl(hsotg->regs + DIEPDMA(i)); 167 168 /* Backup OUT EPs */ 169 dr->doepctl[i] = readl(hsotg->regs + DOEPCTL(i)); 170 171 /* Ensure DATA PID is correctly configured */ 172 if (dr->doepctl[i] & DXEPCTL_DPID) 173 dr->doepctl[i] |= DXEPCTL_SETD1PID; 174 else 175 dr->doepctl[i] |= DXEPCTL_SETD0PID; 176 177 dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); 178 dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); 179 } 180 dr->valid = true; 181 return 0; 182 } 183 184 /** 185 * dwc2_restore_device_registers() - Restore controller device registers. 186 * When resuming usb bus, device registers needs to be restored 187 * if controller power were disabled. 188 * 189 * @hsotg: Programming view of the DWC_otg controller 190 */ 191 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 192 { 193 struct dwc2_dregs_backup *dr; 194 u32 dctl; 195 int i; 196 197 dev_dbg(hsotg->dev, "%s\n", __func__); 198 199 /* Restore dev regs */ 200 dr = &hsotg->dr_backup; 201 if (!dr->valid) { 202 dev_err(hsotg->dev, "%s: no device registers to restore\n", 203 __func__); 204 return -EINVAL; 205 } 206 dr->valid = false; 207 208 writel(dr->dcfg, hsotg->regs + DCFG); 209 writel(dr->dctl, hsotg->regs + DCTL); 210 writel(dr->daintmsk, hsotg->regs + DAINTMSK); 211 writel(dr->diepmsk, hsotg->regs + DIEPMSK); 212 writel(dr->doepmsk, hsotg->regs + DOEPMSK); 213 214 for (i = 0; i < hsotg->num_of_eps; i++) { 215 /* Restore IN EPs */ 216 writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); 217 writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); 218 writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); 219 220 /* Restore OUT EPs */ 221 writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); 222 writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); 223 writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); 224 } 225 226 /* Set the Power-On Programming done bit */ 227 dctl = readl(hsotg->regs + DCTL); 228 dctl |= DCTL_PWRONPRGDONE; 229 writel(dctl, hsotg->regs + DCTL); 230 231 return 0; 232 } 233 #else 234 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 235 { return 0; } 236 237 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 238 { return 0; } 239 #endif 240 241 /** 242 * dwc2_backup_global_registers() - Backup global controller registers. 243 * When suspending usb bus, registers needs to be backuped 244 * if controller power is disabled once suspended. 245 * 246 * @hsotg: Programming view of the DWC_otg controller 247 */ 248 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) 249 { 250 struct dwc2_gregs_backup *gr; 251 int i; 252 253 /* Backup global regs */ 254 gr = &hsotg->gr_backup; 255 256 gr->gotgctl = readl(hsotg->regs + GOTGCTL); 257 gr->gintmsk = readl(hsotg->regs + GINTMSK); 258 gr->gahbcfg = readl(hsotg->regs + GAHBCFG); 259 gr->gusbcfg = readl(hsotg->regs + GUSBCFG); 260 gr->grxfsiz = readl(hsotg->regs + GRXFSIZ); 261 gr->gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ); 262 gr->hptxfsiz = readl(hsotg->regs + HPTXFSIZ); 263 gr->gdfifocfg = readl(hsotg->regs + GDFIFOCFG); 264 for (i = 0; i < MAX_EPS_CHANNELS; i++) 265 gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); 266 267 gr->valid = true; 268 return 0; 269 } 270 271 /** 272 * dwc2_restore_global_registers() - Restore controller global registers. 273 * When resuming usb bus, device registers needs to be restored 274 * if controller power were disabled. 275 * 276 * @hsotg: Programming view of the DWC_otg controller 277 */ 278 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) 279 { 280 struct dwc2_gregs_backup *gr; 281 int i; 282 283 dev_dbg(hsotg->dev, "%s\n", __func__); 284 285 /* Restore global regs */ 286 gr = &hsotg->gr_backup; 287 if (!gr->valid) { 288 dev_err(hsotg->dev, "%s: no global registers to restore\n", 289 __func__); 290 return -EINVAL; 291 } 292 gr->valid = false; 293 294 writel(0xffffffff, hsotg->regs + GINTSTS); 295 writel(gr->gotgctl, hsotg->regs + GOTGCTL); 296 writel(gr->gintmsk, hsotg->regs + GINTMSK); 297 writel(gr->gusbcfg, hsotg->regs + GUSBCFG); 298 writel(gr->gahbcfg, hsotg->regs + GAHBCFG); 299 writel(gr->grxfsiz, hsotg->regs + GRXFSIZ); 300 writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ); 301 writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ); 302 writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG); 303 for (i = 0; i < MAX_EPS_CHANNELS; i++) 304 writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i)); 305 306 return 0; 307 } 308 309 /** 310 * dwc2_exit_hibernation() - Exit controller from Partial Power Down. 311 * 312 * @hsotg: Programming view of the DWC_otg controller 313 * @restore: Controller registers need to be restored 314 */ 315 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) 316 { 317 u32 pcgcctl; 318 int ret = 0; 319 320 if (!hsotg->core_params->hibernation) 321 return -ENOTSUPP; 322 323 pcgcctl = readl(hsotg->regs + PCGCTL); 324 pcgcctl &= ~PCGCTL_STOPPCLK; 325 writel(pcgcctl, hsotg->regs + PCGCTL); 326 327 pcgcctl = readl(hsotg->regs + PCGCTL); 328 pcgcctl &= ~PCGCTL_PWRCLMP; 329 writel(pcgcctl, hsotg->regs + PCGCTL); 330 331 pcgcctl = readl(hsotg->regs + PCGCTL); 332 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 333 writel(pcgcctl, hsotg->regs + PCGCTL); 334 335 udelay(100); 336 if (restore) { 337 ret = dwc2_restore_global_registers(hsotg); 338 if (ret) { 339 dev_err(hsotg->dev, "%s: failed to restore registers\n", 340 __func__); 341 return ret; 342 } 343 if (dwc2_is_host_mode(hsotg)) { 344 ret = dwc2_restore_host_registers(hsotg); 345 if (ret) { 346 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 347 __func__); 348 return ret; 349 } 350 } else { 351 ret = dwc2_restore_device_registers(hsotg); 352 if (ret) { 353 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 354 __func__); 355 return ret; 356 } 357 } 358 } 359 360 return ret; 361 } 362 363 /** 364 * dwc2_enter_hibernation() - Put controller in Partial Power Down. 365 * 366 * @hsotg: Programming view of the DWC_otg controller 367 */ 368 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) 369 { 370 u32 pcgcctl; 371 int ret = 0; 372 373 if (!hsotg->core_params->hibernation) 374 return -ENOTSUPP; 375 376 /* Backup all registers */ 377 ret = dwc2_backup_global_registers(hsotg); 378 if (ret) { 379 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 380 __func__); 381 return ret; 382 } 383 384 if (dwc2_is_host_mode(hsotg)) { 385 ret = dwc2_backup_host_registers(hsotg); 386 if (ret) { 387 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 388 __func__); 389 return ret; 390 } 391 } else { 392 ret = dwc2_backup_device_registers(hsotg); 393 if (ret) { 394 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 395 __func__); 396 return ret; 397 } 398 } 399 400 /* Put the controller in low power state */ 401 pcgcctl = readl(hsotg->regs + PCGCTL); 402 403 pcgcctl |= PCGCTL_PWRCLMP; 404 writel(pcgcctl, hsotg->regs + PCGCTL); 405 ndelay(20); 406 407 pcgcctl |= PCGCTL_RSTPDWNMODULE; 408 writel(pcgcctl, hsotg->regs + PCGCTL); 409 ndelay(20); 410 411 pcgcctl |= PCGCTL_STOPPCLK; 412 writel(pcgcctl, hsotg->regs + PCGCTL); 413 414 return ret; 415 } 416 417 /** 418 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 419 * used in both device and host modes 420 * 421 * @hsotg: Programming view of the DWC_otg controller 422 */ 423 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 424 { 425 u32 intmsk; 426 427 /* Clear any pending OTG Interrupts */ 428 writel(0xffffffff, hsotg->regs + GOTGINT); 429 430 /* Clear any pending interrupts */ 431 writel(0xffffffff, hsotg->regs + GINTSTS); 432 433 /* Enable the interrupts in the GINTMSK */ 434 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 435 436 if (hsotg->core_params->dma_enable <= 0) 437 intmsk |= GINTSTS_RXFLVL; 438 if (hsotg->core_params->external_id_pin_ctl <= 0) 439 intmsk |= GINTSTS_CONIDSTSCHNG; 440 441 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | 442 GINTSTS_SESSREQINT; 443 444 writel(intmsk, hsotg->regs + GINTMSK); 445 } 446 447 /* 448 * Initializes the FSLSPClkSel field of the HCFG register depending on the 449 * PHY type 450 */ 451 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 452 { 453 u32 hcfg, val; 454 455 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 456 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 457 hsotg->core_params->ulpi_fs_ls > 0) || 458 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 459 /* Full speed PHY */ 460 val = HCFG_FSLSPCLKSEL_48_MHZ; 461 } else { 462 /* High speed PHY running at full speed or high speed */ 463 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 464 } 465 466 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 467 hcfg = readl(hsotg->regs + HCFG); 468 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 469 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 470 writel(hcfg, hsotg->regs + HCFG); 471 } 472 473 /* 474 * Do core a soft reset of the core. Be careful with this because it 475 * resets all the internal state machines of the core. 476 */ 477 static int dwc2_core_reset(struct dwc2_hsotg *hsotg) 478 { 479 u32 greset; 480 int count = 0; 481 u32 gusbcfg; 482 483 dev_vdbg(hsotg->dev, "%s()\n", __func__); 484 485 /* Wait for AHB master IDLE state */ 486 do { 487 usleep_range(20000, 40000); 488 greset = readl(hsotg->regs + GRSTCTL); 489 if (++count > 50) { 490 dev_warn(hsotg->dev, 491 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 492 __func__, greset); 493 return -EBUSY; 494 } 495 } while (!(greset & GRSTCTL_AHBIDLE)); 496 497 /* Core Soft Reset */ 498 count = 0; 499 greset |= GRSTCTL_CSFTRST; 500 writel(greset, hsotg->regs + GRSTCTL); 501 do { 502 usleep_range(20000, 40000); 503 greset = readl(hsotg->regs + GRSTCTL); 504 if (++count > 50) { 505 dev_warn(hsotg->dev, 506 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 507 __func__, greset); 508 return -EBUSY; 509 } 510 } while (greset & GRSTCTL_CSFTRST); 511 512 if (hsotg->dr_mode == USB_DR_MODE_HOST) { 513 gusbcfg = readl(hsotg->regs + GUSBCFG); 514 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 515 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 516 writel(gusbcfg, hsotg->regs + GUSBCFG); 517 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 518 gusbcfg = readl(hsotg->regs + GUSBCFG); 519 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 520 gusbcfg |= GUSBCFG_FORCEDEVMODE; 521 writel(gusbcfg, hsotg->regs + GUSBCFG); 522 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) { 523 gusbcfg = readl(hsotg->regs + GUSBCFG); 524 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 525 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 526 writel(gusbcfg, hsotg->regs + GUSBCFG); 527 } 528 529 /* 530 * NOTE: This long sleep is _very_ important, otherwise the core will 531 * not stay in host mode after a connector ID change! 532 */ 533 usleep_range(150000, 200000); 534 535 return 0; 536 } 537 538 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 539 { 540 u32 usbcfg, i2cctl; 541 int retval = 0; 542 543 /* 544 * core_init() is now called on every switch so only call the 545 * following for the first time through 546 */ 547 if (select_phy) { 548 dev_dbg(hsotg->dev, "FS PHY selected\n"); 549 usbcfg = readl(hsotg->regs + GUSBCFG); 550 usbcfg |= GUSBCFG_PHYSEL; 551 writel(usbcfg, hsotg->regs + GUSBCFG); 552 553 /* Reset after a PHY select */ 554 retval = dwc2_core_reset(hsotg); 555 if (retval) { 556 dev_err(hsotg->dev, "%s() Reset failed, aborting", 557 __func__); 558 return retval; 559 } 560 } 561 562 /* 563 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 564 * do this on HNP Dev/Host mode switches (done in dev_init and 565 * host_init). 566 */ 567 if (dwc2_is_host_mode(hsotg)) 568 dwc2_init_fs_ls_pclk_sel(hsotg); 569 570 if (hsotg->core_params->i2c_enable > 0) { 571 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 572 573 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 574 usbcfg = readl(hsotg->regs + GUSBCFG); 575 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 576 writel(usbcfg, hsotg->regs + GUSBCFG); 577 578 /* Program GI2CCTL.I2CEn */ 579 i2cctl = readl(hsotg->regs + GI2CCTL); 580 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 581 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 582 i2cctl &= ~GI2CCTL_I2CEN; 583 writel(i2cctl, hsotg->regs + GI2CCTL); 584 i2cctl |= GI2CCTL_I2CEN; 585 writel(i2cctl, hsotg->regs + GI2CCTL); 586 } 587 588 return retval; 589 } 590 591 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 592 { 593 u32 usbcfg; 594 int retval = 0; 595 596 if (!select_phy) 597 return 0; 598 599 usbcfg = readl(hsotg->regs + GUSBCFG); 600 601 /* 602 * HS PHY parameters. These parameters are preserved during soft reset 603 * so only program the first time. Do a soft reset immediately after 604 * setting phyif. 605 */ 606 switch (hsotg->core_params->phy_type) { 607 case DWC2_PHY_TYPE_PARAM_ULPI: 608 /* ULPI interface */ 609 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 610 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 611 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 612 if (hsotg->core_params->phy_ulpi_ddr > 0) 613 usbcfg |= GUSBCFG_DDRSEL; 614 break; 615 case DWC2_PHY_TYPE_PARAM_UTMI: 616 /* UTMI+ interface */ 617 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 618 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 619 if (hsotg->core_params->phy_utmi_width == 16) 620 usbcfg |= GUSBCFG_PHYIF16; 621 break; 622 default: 623 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 624 break; 625 } 626 627 writel(usbcfg, hsotg->regs + GUSBCFG); 628 629 /* Reset after setting the PHY parameters */ 630 retval = dwc2_core_reset(hsotg); 631 if (retval) { 632 dev_err(hsotg->dev, "%s() Reset failed, aborting", 633 __func__); 634 return retval; 635 } 636 637 return retval; 638 } 639 640 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 641 { 642 u32 usbcfg; 643 int retval = 0; 644 645 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 646 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 647 /* If FS mode with FS PHY */ 648 retval = dwc2_fs_phy_init(hsotg, select_phy); 649 if (retval) 650 return retval; 651 } else { 652 /* High speed PHY */ 653 retval = dwc2_hs_phy_init(hsotg, select_phy); 654 if (retval) 655 return retval; 656 } 657 658 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 659 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 660 hsotg->core_params->ulpi_fs_ls > 0) { 661 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 662 usbcfg = readl(hsotg->regs + GUSBCFG); 663 usbcfg |= GUSBCFG_ULPI_FS_LS; 664 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 665 writel(usbcfg, hsotg->regs + GUSBCFG); 666 } else { 667 usbcfg = readl(hsotg->regs + GUSBCFG); 668 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 669 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 670 writel(usbcfg, hsotg->regs + GUSBCFG); 671 } 672 673 return retval; 674 } 675 676 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 677 { 678 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 679 680 switch (hsotg->hw_params.arch) { 681 case GHWCFG2_EXT_DMA_ARCH: 682 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 683 return -EINVAL; 684 685 case GHWCFG2_INT_DMA_ARCH: 686 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 687 if (hsotg->core_params->ahbcfg != -1) { 688 ahbcfg &= GAHBCFG_CTRL_MASK; 689 ahbcfg |= hsotg->core_params->ahbcfg & 690 ~GAHBCFG_CTRL_MASK; 691 } 692 break; 693 694 case GHWCFG2_SLAVE_ONLY_ARCH: 695 default: 696 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 697 break; 698 } 699 700 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 701 hsotg->core_params->dma_enable, 702 hsotg->core_params->dma_desc_enable); 703 704 if (hsotg->core_params->dma_enable > 0) { 705 if (hsotg->core_params->dma_desc_enable > 0) 706 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 707 else 708 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 709 } else { 710 dev_dbg(hsotg->dev, "Using Slave mode\n"); 711 hsotg->core_params->dma_desc_enable = 0; 712 } 713 714 if (hsotg->core_params->dma_enable > 0) 715 ahbcfg |= GAHBCFG_DMA_EN; 716 717 writel(ahbcfg, hsotg->regs + GAHBCFG); 718 719 return 0; 720 } 721 722 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 723 { 724 u32 usbcfg; 725 726 usbcfg = readl(hsotg->regs + GUSBCFG); 727 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 728 729 switch (hsotg->hw_params.op_mode) { 730 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 731 if (hsotg->core_params->otg_cap == 732 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 733 usbcfg |= GUSBCFG_HNPCAP; 734 if (hsotg->core_params->otg_cap != 735 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 736 usbcfg |= GUSBCFG_SRPCAP; 737 break; 738 739 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 740 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 741 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 742 if (hsotg->core_params->otg_cap != 743 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 744 usbcfg |= GUSBCFG_SRPCAP; 745 break; 746 747 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 748 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 749 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 750 default: 751 break; 752 } 753 754 writel(usbcfg, hsotg->regs + GUSBCFG); 755 } 756 757 /** 758 * dwc2_core_init() - Initializes the DWC_otg controller registers and 759 * prepares the core for device mode or host mode operation 760 * 761 * @hsotg: Programming view of the DWC_otg controller 762 * @select_phy: If true then also set the Phy type 763 * @irq: If >= 0, the irq to register 764 */ 765 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) 766 { 767 u32 usbcfg, otgctl; 768 int retval; 769 770 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 771 772 usbcfg = readl(hsotg->regs + GUSBCFG); 773 774 /* Set ULPI External VBUS bit if needed */ 775 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 776 if (hsotg->core_params->phy_ulpi_ext_vbus == 777 DWC2_PHY_ULPI_EXTERNAL_VBUS) 778 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 779 780 /* Set external TS Dline pulsing bit if needed */ 781 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 782 if (hsotg->core_params->ts_dline > 0) 783 usbcfg |= GUSBCFG_TERMSELDLPULSE; 784 785 writel(usbcfg, hsotg->regs + GUSBCFG); 786 787 /* Reset the Controller */ 788 retval = dwc2_core_reset(hsotg); 789 if (retval) { 790 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 791 __func__); 792 return retval; 793 } 794 795 /* 796 * This needs to happen in FS mode before any other programming occurs 797 */ 798 retval = dwc2_phy_init(hsotg, select_phy); 799 if (retval) 800 return retval; 801 802 /* Program the GAHBCFG Register */ 803 retval = dwc2_gahbcfg_init(hsotg); 804 if (retval) 805 return retval; 806 807 /* Program the GUSBCFG register */ 808 dwc2_gusbcfg_init(hsotg); 809 810 /* Program the GOTGCTL register */ 811 otgctl = readl(hsotg->regs + GOTGCTL); 812 otgctl &= ~GOTGCTL_OTGVER; 813 if (hsotg->core_params->otg_ver > 0) 814 otgctl |= GOTGCTL_OTGVER; 815 writel(otgctl, hsotg->regs + GOTGCTL); 816 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 817 818 /* Clear the SRP success bit for FS-I2c */ 819 hsotg->srp_success = 0; 820 821 /* Enable common interrupts */ 822 dwc2_enable_common_interrupts(hsotg); 823 824 /* 825 * Do device or host initialization based on mode during PCD and 826 * HCD initialization 827 */ 828 if (dwc2_is_host_mode(hsotg)) { 829 dev_dbg(hsotg->dev, "Host Mode\n"); 830 hsotg->op_state = OTG_STATE_A_HOST; 831 } else { 832 dev_dbg(hsotg->dev, "Device Mode\n"); 833 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 834 } 835 836 return 0; 837 } 838 839 /** 840 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 841 * 842 * @hsotg: Programming view of DWC_otg controller 843 */ 844 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 845 { 846 u32 intmsk; 847 848 dev_dbg(hsotg->dev, "%s()\n", __func__); 849 850 /* Disable all interrupts */ 851 writel(0, hsotg->regs + GINTMSK); 852 writel(0, hsotg->regs + HAINTMSK); 853 854 /* Enable the common interrupts */ 855 dwc2_enable_common_interrupts(hsotg); 856 857 /* Enable host mode interrupts without disturbing common interrupts */ 858 intmsk = readl(hsotg->regs + GINTMSK); 859 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 860 writel(intmsk, hsotg->regs + GINTMSK); 861 } 862 863 /** 864 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 865 * 866 * @hsotg: Programming view of DWC_otg controller 867 */ 868 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 869 { 870 u32 intmsk = readl(hsotg->regs + GINTMSK); 871 872 /* Disable host mode interrupts without disturbing common interrupts */ 873 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 874 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP); 875 writel(intmsk, hsotg->regs + GINTMSK); 876 } 877 878 /* 879 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 880 * For system that have a total fifo depth that is smaller than the default 881 * RX + TX fifo size. 882 * 883 * @hsotg: Programming view of DWC_otg controller 884 */ 885 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 886 { 887 struct dwc2_core_params *params = hsotg->core_params; 888 struct dwc2_hw_params *hw = &hsotg->hw_params; 889 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 890 891 total_fifo_size = hw->total_fifo_size; 892 rxfsiz = params->host_rx_fifo_size; 893 nptxfsiz = params->host_nperio_tx_fifo_size; 894 ptxfsiz = params->host_perio_tx_fifo_size; 895 896 /* 897 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 898 * allocation with support for high bandwidth endpoints. Synopsys 899 * defines MPS(Max Packet size) for a periodic EP=1024, and for 900 * non-periodic as 512. 901 */ 902 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 903 /* 904 * For Buffer DMA mode/Scatter Gather DMA mode 905 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 906 * with n = number of host channel. 907 * 2 * ((1024/4) + 2) = 516 908 */ 909 rxfsiz = 516 + hw->host_channels; 910 911 /* 912 * min non-periodic tx fifo depth 913 * 2 * (largest non-periodic USB packet used / 4) 914 * 2 * (512/4) = 256 915 */ 916 nptxfsiz = 256; 917 918 /* 919 * min periodic tx fifo depth 920 * (largest packet size*MC)/4 921 * (1024 * 3)/4 = 768 922 */ 923 ptxfsiz = 768; 924 925 params->host_rx_fifo_size = rxfsiz; 926 params->host_nperio_tx_fifo_size = nptxfsiz; 927 params->host_perio_tx_fifo_size = ptxfsiz; 928 } 929 930 /* 931 * If the summation of RX, NPTX and PTX fifo sizes is still 932 * bigger than the total_fifo_size, then we have a problem. 933 * 934 * We won't be able to allocate as many endpoints. Right now, 935 * we're just printing an error message, but ideally this FIFO 936 * allocation algorithm would be improved in the future. 937 * 938 * FIXME improve this FIFO allocation algorithm. 939 */ 940 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) 941 dev_err(hsotg->dev, "invalid fifo sizes\n"); 942 } 943 944 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 945 { 946 struct dwc2_core_params *params = hsotg->core_params; 947 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 948 949 if (!params->enable_dynamic_fifo) 950 return; 951 952 dwc2_calculate_dynamic_fifo(hsotg); 953 954 /* Rx FIFO */ 955 grxfsiz = readl(hsotg->regs + GRXFSIZ); 956 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 957 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 958 grxfsiz |= params->host_rx_fifo_size << 959 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 960 writel(grxfsiz, hsotg->regs + GRXFSIZ); 961 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ)); 962 963 /* Non-periodic Tx FIFO */ 964 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 965 readl(hsotg->regs + GNPTXFSIZ)); 966 nptxfsiz = params->host_nperio_tx_fifo_size << 967 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 968 nptxfsiz |= params->host_rx_fifo_size << 969 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 970 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); 971 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 972 readl(hsotg->regs + GNPTXFSIZ)); 973 974 /* Periodic Tx FIFO */ 975 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 976 readl(hsotg->regs + HPTXFSIZ)); 977 hptxfsiz = params->host_perio_tx_fifo_size << 978 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 979 hptxfsiz |= (params->host_rx_fifo_size + 980 params->host_nperio_tx_fifo_size) << 981 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 982 writel(hptxfsiz, hsotg->regs + HPTXFSIZ); 983 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 984 readl(hsotg->regs + HPTXFSIZ)); 985 986 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 987 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 988 /* 989 * Global DFIFOCFG calculation for Host mode - 990 * include RxFIFO, NPTXFIFO and HPTXFIFO 991 */ 992 dfifocfg = readl(hsotg->regs + GDFIFOCFG); 993 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 994 dfifocfg |= (params->host_rx_fifo_size + 995 params->host_nperio_tx_fifo_size + 996 params->host_perio_tx_fifo_size) << 997 GDFIFOCFG_EPINFOBASE_SHIFT & 998 GDFIFOCFG_EPINFOBASE_MASK; 999 writel(dfifocfg, hsotg->regs + GDFIFOCFG); 1000 } 1001 } 1002 1003 /** 1004 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 1005 * Host mode 1006 * 1007 * @hsotg: Programming view of DWC_otg controller 1008 * 1009 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 1010 * request queues. Host channels are reset to ensure that they are ready for 1011 * performing transfers. 1012 */ 1013 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 1014 { 1015 u32 hcfg, hfir, otgctl; 1016 1017 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 1018 1019 /* Restart the Phy Clock */ 1020 writel(0, hsotg->regs + PCGCTL); 1021 1022 /* Initialize Host Configuration Register */ 1023 dwc2_init_fs_ls_pclk_sel(hsotg); 1024 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 1025 hcfg = readl(hsotg->regs + HCFG); 1026 hcfg |= HCFG_FSLSSUPP; 1027 writel(hcfg, hsotg->regs + HCFG); 1028 } 1029 1030 /* 1031 * This bit allows dynamic reloading of the HFIR register during 1032 * runtime. This bit needs to be programmed during initial configuration 1033 * and its value must not be changed during runtime. 1034 */ 1035 if (hsotg->core_params->reload_ctl > 0) { 1036 hfir = readl(hsotg->regs + HFIR); 1037 hfir |= HFIR_RLDCTRL; 1038 writel(hfir, hsotg->regs + HFIR); 1039 } 1040 1041 if (hsotg->core_params->dma_desc_enable > 0) { 1042 u32 op_mode = hsotg->hw_params.op_mode; 1043 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 1044 !hsotg->hw_params.dma_desc_enable || 1045 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 1046 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 1047 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 1048 dev_err(hsotg->dev, 1049 "Hardware does not support descriptor DMA mode -\n"); 1050 dev_err(hsotg->dev, 1051 "falling back to buffer DMA mode.\n"); 1052 hsotg->core_params->dma_desc_enable = 0; 1053 } else { 1054 hcfg = readl(hsotg->regs + HCFG); 1055 hcfg |= HCFG_DESCDMA; 1056 writel(hcfg, hsotg->regs + HCFG); 1057 } 1058 } 1059 1060 /* Configure data FIFO sizes */ 1061 dwc2_config_fifos(hsotg); 1062 1063 /* TODO - check this */ 1064 /* Clear Host Set HNP Enable in the OTG Control Register */ 1065 otgctl = readl(hsotg->regs + GOTGCTL); 1066 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1067 writel(otgctl, hsotg->regs + GOTGCTL); 1068 1069 /* Make sure the FIFOs are flushed */ 1070 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 1071 dwc2_flush_rx_fifo(hsotg); 1072 1073 /* Clear Host Set HNP Enable in the OTG Control Register */ 1074 otgctl = readl(hsotg->regs + GOTGCTL); 1075 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1076 writel(otgctl, hsotg->regs + GOTGCTL); 1077 1078 if (hsotg->core_params->dma_desc_enable <= 0) { 1079 int num_channels, i; 1080 u32 hcchar; 1081 1082 /* Flush out any leftover queued requests */ 1083 num_channels = hsotg->core_params->host_channels; 1084 for (i = 0; i < num_channels; i++) { 1085 hcchar = readl(hsotg->regs + HCCHAR(i)); 1086 hcchar &= ~HCCHAR_CHENA; 1087 hcchar |= HCCHAR_CHDIS; 1088 hcchar &= ~HCCHAR_EPDIR; 1089 writel(hcchar, hsotg->regs + HCCHAR(i)); 1090 } 1091 1092 /* Halt all channels to put them into a known state */ 1093 for (i = 0; i < num_channels; i++) { 1094 int count = 0; 1095 1096 hcchar = readl(hsotg->regs + HCCHAR(i)); 1097 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 1098 hcchar &= ~HCCHAR_EPDIR; 1099 writel(hcchar, hsotg->regs + HCCHAR(i)); 1100 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 1101 __func__, i); 1102 do { 1103 hcchar = readl(hsotg->regs + HCCHAR(i)); 1104 if (++count > 1000) { 1105 dev_err(hsotg->dev, 1106 "Unable to clear enable on channel %d\n", 1107 i); 1108 break; 1109 } 1110 udelay(1); 1111 } while (hcchar & HCCHAR_CHENA); 1112 } 1113 } 1114 1115 /* Turn on the vbus power */ 1116 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 1117 if (hsotg->op_state == OTG_STATE_A_HOST) { 1118 u32 hprt0 = dwc2_read_hprt0(hsotg); 1119 1120 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 1121 !!(hprt0 & HPRT0_PWR)); 1122 if (!(hprt0 & HPRT0_PWR)) { 1123 hprt0 |= HPRT0_PWR; 1124 writel(hprt0, hsotg->regs + HPRT0); 1125 } 1126 } 1127 1128 dwc2_enable_host_interrupts(hsotg); 1129 } 1130 1131 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 1132 struct dwc2_host_chan *chan) 1133 { 1134 u32 hcintmsk = HCINTMSK_CHHLTD; 1135 1136 switch (chan->ep_type) { 1137 case USB_ENDPOINT_XFER_CONTROL: 1138 case USB_ENDPOINT_XFER_BULK: 1139 dev_vdbg(hsotg->dev, "control/bulk\n"); 1140 hcintmsk |= HCINTMSK_XFERCOMPL; 1141 hcintmsk |= HCINTMSK_STALL; 1142 hcintmsk |= HCINTMSK_XACTERR; 1143 hcintmsk |= HCINTMSK_DATATGLERR; 1144 if (chan->ep_is_in) { 1145 hcintmsk |= HCINTMSK_BBLERR; 1146 } else { 1147 hcintmsk |= HCINTMSK_NAK; 1148 hcintmsk |= HCINTMSK_NYET; 1149 if (chan->do_ping) 1150 hcintmsk |= HCINTMSK_ACK; 1151 } 1152 1153 if (chan->do_split) { 1154 hcintmsk |= HCINTMSK_NAK; 1155 if (chan->complete_split) 1156 hcintmsk |= HCINTMSK_NYET; 1157 else 1158 hcintmsk |= HCINTMSK_ACK; 1159 } 1160 1161 if (chan->error_state) 1162 hcintmsk |= HCINTMSK_ACK; 1163 break; 1164 1165 case USB_ENDPOINT_XFER_INT: 1166 if (dbg_perio()) 1167 dev_vdbg(hsotg->dev, "intr\n"); 1168 hcintmsk |= HCINTMSK_XFERCOMPL; 1169 hcintmsk |= HCINTMSK_NAK; 1170 hcintmsk |= HCINTMSK_STALL; 1171 hcintmsk |= HCINTMSK_XACTERR; 1172 hcintmsk |= HCINTMSK_DATATGLERR; 1173 hcintmsk |= HCINTMSK_FRMOVRUN; 1174 1175 if (chan->ep_is_in) 1176 hcintmsk |= HCINTMSK_BBLERR; 1177 if (chan->error_state) 1178 hcintmsk |= HCINTMSK_ACK; 1179 if (chan->do_split) { 1180 if (chan->complete_split) 1181 hcintmsk |= HCINTMSK_NYET; 1182 else 1183 hcintmsk |= HCINTMSK_ACK; 1184 } 1185 break; 1186 1187 case USB_ENDPOINT_XFER_ISOC: 1188 if (dbg_perio()) 1189 dev_vdbg(hsotg->dev, "isoc\n"); 1190 hcintmsk |= HCINTMSK_XFERCOMPL; 1191 hcintmsk |= HCINTMSK_FRMOVRUN; 1192 hcintmsk |= HCINTMSK_ACK; 1193 1194 if (chan->ep_is_in) { 1195 hcintmsk |= HCINTMSK_XACTERR; 1196 hcintmsk |= HCINTMSK_BBLERR; 1197 } 1198 break; 1199 default: 1200 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 1201 break; 1202 } 1203 1204 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1205 if (dbg_hc(chan)) 1206 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1207 } 1208 1209 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 1210 struct dwc2_host_chan *chan) 1211 { 1212 u32 hcintmsk = HCINTMSK_CHHLTD; 1213 1214 /* 1215 * For Descriptor DMA mode core halts the channel on AHB error. 1216 * Interrupt is not required. 1217 */ 1218 if (hsotg->core_params->dma_desc_enable <= 0) { 1219 if (dbg_hc(chan)) 1220 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1221 hcintmsk |= HCINTMSK_AHBERR; 1222 } else { 1223 if (dbg_hc(chan)) 1224 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 1225 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1226 hcintmsk |= HCINTMSK_XFERCOMPL; 1227 } 1228 1229 if (chan->error_state && !chan->do_split && 1230 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1231 if (dbg_hc(chan)) 1232 dev_vdbg(hsotg->dev, "setting ACK\n"); 1233 hcintmsk |= HCINTMSK_ACK; 1234 if (chan->ep_is_in) { 1235 hcintmsk |= HCINTMSK_DATATGLERR; 1236 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 1237 hcintmsk |= HCINTMSK_NAK; 1238 } 1239 } 1240 1241 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1242 if (dbg_hc(chan)) 1243 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1244 } 1245 1246 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 1247 struct dwc2_host_chan *chan) 1248 { 1249 u32 intmsk; 1250 1251 if (hsotg->core_params->dma_enable > 0) { 1252 if (dbg_hc(chan)) 1253 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1254 dwc2_hc_enable_dma_ints(hsotg, chan); 1255 } else { 1256 if (dbg_hc(chan)) 1257 dev_vdbg(hsotg->dev, "DMA disabled\n"); 1258 dwc2_hc_enable_slave_ints(hsotg, chan); 1259 } 1260 1261 /* Enable the top level host channel interrupt */ 1262 intmsk = readl(hsotg->regs + HAINTMSK); 1263 intmsk |= 1 << chan->hc_num; 1264 writel(intmsk, hsotg->regs + HAINTMSK); 1265 if (dbg_hc(chan)) 1266 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 1267 1268 /* Make sure host channel interrupts are enabled */ 1269 intmsk = readl(hsotg->regs + GINTMSK); 1270 intmsk |= GINTSTS_HCHINT; 1271 writel(intmsk, hsotg->regs + GINTMSK); 1272 if (dbg_hc(chan)) 1273 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 1274 } 1275 1276 /** 1277 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 1278 * a specific endpoint 1279 * 1280 * @hsotg: Programming view of DWC_otg controller 1281 * @chan: Information needed to initialize the host channel 1282 * 1283 * The HCCHARn register is set up with the characteristics specified in chan. 1284 * Host channel interrupts that may need to be serviced while this transfer is 1285 * in progress are enabled. 1286 */ 1287 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1288 { 1289 u8 hc_num = chan->hc_num; 1290 u32 hcintmsk; 1291 u32 hcchar; 1292 u32 hcsplt = 0; 1293 1294 if (dbg_hc(chan)) 1295 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1296 1297 /* Clear old interrupt conditions for this host channel */ 1298 hcintmsk = 0xffffffff; 1299 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1300 writel(hcintmsk, hsotg->regs + HCINT(hc_num)); 1301 1302 /* Enable channel interrupts required for this transfer */ 1303 dwc2_hc_enable_ints(hsotg, chan); 1304 1305 /* 1306 * Program the HCCHARn register with the endpoint characteristics for 1307 * the current transfer 1308 */ 1309 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 1310 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 1311 if (chan->ep_is_in) 1312 hcchar |= HCCHAR_EPDIR; 1313 if (chan->speed == USB_SPEED_LOW) 1314 hcchar |= HCCHAR_LSPDDEV; 1315 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 1316 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 1317 writel(hcchar, hsotg->regs + HCCHAR(hc_num)); 1318 if (dbg_hc(chan)) { 1319 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 1320 hc_num, hcchar); 1321 1322 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 1323 __func__, hc_num); 1324 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 1325 chan->dev_addr); 1326 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 1327 chan->ep_num); 1328 dev_vdbg(hsotg->dev, " Is In: %d\n", 1329 chan->ep_is_in); 1330 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 1331 chan->speed == USB_SPEED_LOW); 1332 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 1333 chan->ep_type); 1334 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 1335 chan->max_packet); 1336 } 1337 1338 /* Program the HCSPLT register for SPLITs */ 1339 if (chan->do_split) { 1340 if (dbg_hc(chan)) 1341 dev_vdbg(hsotg->dev, 1342 "Programming HC %d with split --> %s\n", 1343 hc_num, 1344 chan->complete_split ? "CSPLIT" : "SSPLIT"); 1345 if (chan->complete_split) 1346 hcsplt |= HCSPLT_COMPSPLT; 1347 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 1348 HCSPLT_XACTPOS_MASK; 1349 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 1350 HCSPLT_HUBADDR_MASK; 1351 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 1352 HCSPLT_PRTADDR_MASK; 1353 if (dbg_hc(chan)) { 1354 dev_vdbg(hsotg->dev, " comp split %d\n", 1355 chan->complete_split); 1356 dev_vdbg(hsotg->dev, " xact pos %d\n", 1357 chan->xact_pos); 1358 dev_vdbg(hsotg->dev, " hub addr %d\n", 1359 chan->hub_addr); 1360 dev_vdbg(hsotg->dev, " hub port %d\n", 1361 chan->hub_port); 1362 dev_vdbg(hsotg->dev, " is_in %d\n", 1363 chan->ep_is_in); 1364 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 1365 chan->max_packet); 1366 dev_vdbg(hsotg->dev, " xferlen %d\n", 1367 chan->xfer_len); 1368 } 1369 } 1370 1371 writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); 1372 } 1373 1374 /** 1375 * dwc2_hc_halt() - Attempts to halt a host channel 1376 * 1377 * @hsotg: Controller register interface 1378 * @chan: Host channel to halt 1379 * @halt_status: Reason for halting the channel 1380 * 1381 * This function should only be called in Slave mode or to abort a transfer in 1382 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1383 * controller halts the channel when the transfer is complete or a condition 1384 * occurs that requires application intervention. 1385 * 1386 * In slave mode, checks for a free request queue entry, then sets the Channel 1387 * Enable and Channel Disable bits of the Host Channel Characteristics 1388 * register of the specified channel to intiate the halt. If there is no free 1389 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1390 * register to flush requests for this channel. In the latter case, sets a 1391 * flag to indicate that the host channel needs to be halted when a request 1392 * queue slot is open. 1393 * 1394 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1395 * HCCHARn register. The controller ensures there is space in the request 1396 * queue before submitting the halt request. 1397 * 1398 * Some time may elapse before the core flushes any posted requests for this 1399 * host channel and halts. The Channel Halted interrupt handler completes the 1400 * deactivation of the host channel. 1401 */ 1402 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1403 enum dwc2_halt_status halt_status) 1404 { 1405 u32 nptxsts, hptxsts, hcchar; 1406 1407 if (dbg_hc(chan)) 1408 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1409 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1410 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1411 1412 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1413 halt_status == DWC2_HC_XFER_AHB_ERR) { 1414 /* 1415 * Disable all channel interrupts except Ch Halted. The QTD 1416 * and QH state associated with this transfer has been cleared 1417 * (in the case of URB_DEQUEUE), so the channel needs to be 1418 * shut down carefully to prevent crashes. 1419 */ 1420 u32 hcintmsk = HCINTMSK_CHHLTD; 1421 1422 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1423 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1424 1425 /* 1426 * Make sure no other interrupts besides halt are currently 1427 * pending. Handling another interrupt could cause a crash due 1428 * to the QTD and QH state. 1429 */ 1430 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1431 1432 /* 1433 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1434 * even if the channel was already halted for some other 1435 * reason 1436 */ 1437 chan->halt_status = halt_status; 1438 1439 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1440 if (!(hcchar & HCCHAR_CHENA)) { 1441 /* 1442 * The channel is either already halted or it hasn't 1443 * started yet. In DMA mode, the transfer may halt if 1444 * it finishes normally or a condition occurs that 1445 * requires driver intervention. Don't want to halt 1446 * the channel again. In either Slave or DMA mode, 1447 * it's possible that the transfer has been assigned 1448 * to a channel, but not started yet when an URB is 1449 * dequeued. Don't want to halt a channel that hasn't 1450 * started yet. 1451 */ 1452 return; 1453 } 1454 } 1455 if (chan->halt_pending) { 1456 /* 1457 * A halt has already been issued for this channel. This might 1458 * happen when a transfer is aborted by a higher level in 1459 * the stack. 1460 */ 1461 dev_vdbg(hsotg->dev, 1462 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1463 __func__, chan->hc_num); 1464 return; 1465 } 1466 1467 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1468 1469 /* No need to set the bit in DDMA for disabling the channel */ 1470 /* TODO check it everywhere channel is disabled */ 1471 if (hsotg->core_params->dma_desc_enable <= 0) { 1472 if (dbg_hc(chan)) 1473 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1474 hcchar |= HCCHAR_CHENA; 1475 } else { 1476 if (dbg_hc(chan)) 1477 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1478 } 1479 hcchar |= HCCHAR_CHDIS; 1480 1481 if (hsotg->core_params->dma_enable <= 0) { 1482 if (dbg_hc(chan)) 1483 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1484 hcchar |= HCCHAR_CHENA; 1485 1486 /* Check for space in the request queue to issue the halt */ 1487 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1488 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1489 dev_vdbg(hsotg->dev, "control/bulk\n"); 1490 nptxsts = readl(hsotg->regs + GNPTXSTS); 1491 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1492 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1493 hcchar &= ~HCCHAR_CHENA; 1494 } 1495 } else { 1496 if (dbg_perio()) 1497 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1498 hptxsts = readl(hsotg->regs + HPTXSTS); 1499 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1500 hsotg->queuing_high_bandwidth) { 1501 if (dbg_perio()) 1502 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1503 hcchar &= ~HCCHAR_CHENA; 1504 } 1505 } 1506 } else { 1507 if (dbg_hc(chan)) 1508 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1509 } 1510 1511 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1512 chan->halt_status = halt_status; 1513 1514 if (hcchar & HCCHAR_CHENA) { 1515 if (dbg_hc(chan)) 1516 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1517 chan->halt_pending = 1; 1518 chan->halt_on_queue = 0; 1519 } else { 1520 if (dbg_hc(chan)) 1521 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1522 chan->halt_on_queue = 1; 1523 } 1524 1525 if (dbg_hc(chan)) { 1526 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1527 chan->hc_num); 1528 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1529 hcchar); 1530 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1531 chan->halt_pending); 1532 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1533 chan->halt_on_queue); 1534 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1535 chan->halt_status); 1536 } 1537 } 1538 1539 /** 1540 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1541 * 1542 * @hsotg: Programming view of DWC_otg controller 1543 * @chan: Identifies the host channel to clean up 1544 * 1545 * This function is normally called after a transfer is done and the host 1546 * channel is being released 1547 */ 1548 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1549 { 1550 u32 hcintmsk; 1551 1552 chan->xfer_started = 0; 1553 1554 /* 1555 * Clear channel interrupt enables and any unhandled channel interrupt 1556 * conditions 1557 */ 1558 writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); 1559 hcintmsk = 0xffffffff; 1560 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1561 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1562 } 1563 1564 /** 1565 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1566 * which frame a periodic transfer should occur 1567 * 1568 * @hsotg: Programming view of DWC_otg controller 1569 * @chan: Identifies the host channel to set up and its properties 1570 * @hcchar: Current value of the HCCHAR register for the specified host channel 1571 * 1572 * This function has no effect on non-periodic transfers 1573 */ 1574 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1575 struct dwc2_host_chan *chan, u32 *hcchar) 1576 { 1577 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1578 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1579 /* 1 if _next_ frame is odd, 0 if it's even */ 1580 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1581 *hcchar |= HCCHAR_ODDFRM; 1582 } 1583 } 1584 1585 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1586 { 1587 /* Set up the initial PID for the transfer */ 1588 if (chan->speed == USB_SPEED_HIGH) { 1589 if (chan->ep_is_in) { 1590 if (chan->multi_count == 1) 1591 chan->data_pid_start = DWC2_HC_PID_DATA0; 1592 else if (chan->multi_count == 2) 1593 chan->data_pid_start = DWC2_HC_PID_DATA1; 1594 else 1595 chan->data_pid_start = DWC2_HC_PID_DATA2; 1596 } else { 1597 if (chan->multi_count == 1) 1598 chan->data_pid_start = DWC2_HC_PID_DATA0; 1599 else 1600 chan->data_pid_start = DWC2_HC_PID_MDATA; 1601 } 1602 } else { 1603 chan->data_pid_start = DWC2_HC_PID_DATA0; 1604 } 1605 } 1606 1607 /** 1608 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1609 * the Host Channel 1610 * 1611 * @hsotg: Programming view of DWC_otg controller 1612 * @chan: Information needed to initialize the host channel 1613 * 1614 * This function should only be called in Slave mode. For a channel associated 1615 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1616 * associated with a periodic EP, the periodic Tx FIFO is written. 1617 * 1618 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1619 * the number of bytes written to the Tx FIFO. 1620 */ 1621 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1622 struct dwc2_host_chan *chan) 1623 { 1624 u32 i; 1625 u32 remaining_count; 1626 u32 byte_count; 1627 u32 dword_count; 1628 u32 __iomem *data_fifo; 1629 u32 *data_buf = (u32 *)chan->xfer_buf; 1630 1631 if (dbg_hc(chan)) 1632 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1633 1634 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); 1635 1636 remaining_count = chan->xfer_len - chan->xfer_count; 1637 if (remaining_count > chan->max_packet) 1638 byte_count = chan->max_packet; 1639 else 1640 byte_count = remaining_count; 1641 1642 dword_count = (byte_count + 3) / 4; 1643 1644 if (((unsigned long)data_buf & 0x3) == 0) { 1645 /* xfer_buf is DWORD aligned */ 1646 for (i = 0; i < dword_count; i++, data_buf++) 1647 writel(*data_buf, data_fifo); 1648 } else { 1649 /* xfer_buf is not DWORD aligned */ 1650 for (i = 0; i < dword_count; i++, data_buf++) { 1651 u32 data = data_buf[0] | data_buf[1] << 8 | 1652 data_buf[2] << 16 | data_buf[3] << 24; 1653 writel(data, data_fifo); 1654 } 1655 } 1656 1657 chan->xfer_count += byte_count; 1658 chan->xfer_buf += byte_count; 1659 } 1660 1661 /** 1662 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1663 * channel and starts the transfer 1664 * 1665 * @hsotg: Programming view of DWC_otg controller 1666 * @chan: Information needed to initialize the host channel. The xfer_len value 1667 * may be reduced to accommodate the max widths of the XferSize and 1668 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1669 * changed to reflect the final xfer_len value. 1670 * 1671 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1672 * the caller must ensure that there is sufficient space in the request queue 1673 * and Tx Data FIFO. 1674 * 1675 * For an OUT transfer in Slave mode, it loads a data packet into the 1676 * appropriate FIFO. If necessary, additional data packets are loaded in the 1677 * Host ISR. 1678 * 1679 * For an IN transfer in Slave mode, a data packet is requested. The data 1680 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1681 * additional data packets are requested in the Host ISR. 1682 * 1683 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1684 * register along with a packet count of 1 and the channel is enabled. This 1685 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1686 * simply set to 0 since no data transfer occurs in this case. 1687 * 1688 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1689 * all the information required to perform the subsequent data transfer. In 1690 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1691 * controller performs the entire PING protocol, then starts the data 1692 * transfer. 1693 */ 1694 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1695 struct dwc2_host_chan *chan) 1696 { 1697 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1698 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1699 u32 hcchar; 1700 u32 hctsiz = 0; 1701 u16 num_packets; 1702 1703 if (dbg_hc(chan)) 1704 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1705 1706 if (chan->do_ping) { 1707 if (hsotg->core_params->dma_enable <= 0) { 1708 if (dbg_hc(chan)) 1709 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1710 dwc2_hc_do_ping(hsotg, chan); 1711 chan->xfer_started = 1; 1712 return; 1713 } else { 1714 if (dbg_hc(chan)) 1715 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1716 hctsiz |= TSIZ_DOPNG; 1717 } 1718 } 1719 1720 if (chan->do_split) { 1721 if (dbg_hc(chan)) 1722 dev_vdbg(hsotg->dev, "split\n"); 1723 num_packets = 1; 1724 1725 if (chan->complete_split && !chan->ep_is_in) 1726 /* 1727 * For CSPLIT OUT Transfer, set the size to 0 so the 1728 * core doesn't expect any data written to the FIFO 1729 */ 1730 chan->xfer_len = 0; 1731 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1732 chan->xfer_len = chan->max_packet; 1733 else if (!chan->ep_is_in && chan->xfer_len > 188) 1734 chan->xfer_len = 188; 1735 1736 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1737 TSIZ_XFERSIZE_MASK; 1738 } else { 1739 if (dbg_hc(chan)) 1740 dev_vdbg(hsotg->dev, "no split\n"); 1741 /* 1742 * Ensure that the transfer length and packet count will fit 1743 * in the widths allocated for them in the HCTSIZn register 1744 */ 1745 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1746 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1747 /* 1748 * Make sure the transfer size is no larger than one 1749 * (micro)frame's worth of data. (A check was done 1750 * when the periodic transfer was accepted to ensure 1751 * that a (micro)frame's worth of data can be 1752 * programmed into a channel.) 1753 */ 1754 u32 max_periodic_len = 1755 chan->multi_count * chan->max_packet; 1756 1757 if (chan->xfer_len > max_periodic_len) 1758 chan->xfer_len = max_periodic_len; 1759 } else if (chan->xfer_len > max_hc_xfer_size) { 1760 /* 1761 * Make sure that xfer_len is a multiple of max packet 1762 * size 1763 */ 1764 chan->xfer_len = 1765 max_hc_xfer_size - chan->max_packet + 1; 1766 } 1767 1768 if (chan->xfer_len > 0) { 1769 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1770 chan->max_packet; 1771 if (num_packets > max_hc_pkt_count) { 1772 num_packets = max_hc_pkt_count; 1773 chan->xfer_len = num_packets * chan->max_packet; 1774 } 1775 } else { 1776 /* Need 1 packet for transfer length of 0 */ 1777 num_packets = 1; 1778 } 1779 1780 if (chan->ep_is_in) 1781 /* 1782 * Always program an integral # of max packets for IN 1783 * transfers 1784 */ 1785 chan->xfer_len = num_packets * chan->max_packet; 1786 1787 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1788 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1789 /* 1790 * Make sure that the multi_count field matches the 1791 * actual transfer length 1792 */ 1793 chan->multi_count = num_packets; 1794 1795 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1796 dwc2_set_pid_isoc(chan); 1797 1798 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1799 TSIZ_XFERSIZE_MASK; 1800 } 1801 1802 chan->start_pkt_count = num_packets; 1803 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1804 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1805 TSIZ_SC_MC_PID_MASK; 1806 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1807 if (dbg_hc(chan)) { 1808 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1809 hctsiz, chan->hc_num); 1810 1811 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1812 chan->hc_num); 1813 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1814 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1815 TSIZ_XFERSIZE_SHIFT); 1816 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1817 (hctsiz & TSIZ_PKTCNT_MASK) >> 1818 TSIZ_PKTCNT_SHIFT); 1819 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1820 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1821 TSIZ_SC_MC_PID_SHIFT); 1822 } 1823 1824 if (hsotg->core_params->dma_enable > 0) { 1825 dma_addr_t dma_addr; 1826 1827 if (chan->align_buf) { 1828 if (dbg_hc(chan)) 1829 dev_vdbg(hsotg->dev, "align_buf\n"); 1830 dma_addr = chan->align_buf; 1831 } else { 1832 dma_addr = chan->xfer_dma; 1833 } 1834 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); 1835 if (dbg_hc(chan)) 1836 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", 1837 (unsigned long)dma_addr, chan->hc_num); 1838 } 1839 1840 /* Start the split */ 1841 if (chan->do_split) { 1842 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num)); 1843 1844 hcsplt |= HCSPLT_SPLTENA; 1845 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); 1846 } 1847 1848 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1849 hcchar &= ~HCCHAR_MULTICNT_MASK; 1850 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1851 HCCHAR_MULTICNT_MASK; 1852 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1853 1854 if (hcchar & HCCHAR_CHDIS) 1855 dev_warn(hsotg->dev, 1856 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1857 __func__, chan->hc_num, hcchar); 1858 1859 /* Set host channel enable after all other setup is complete */ 1860 hcchar |= HCCHAR_CHENA; 1861 hcchar &= ~HCCHAR_CHDIS; 1862 1863 if (dbg_hc(chan)) 1864 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1865 (hcchar & HCCHAR_MULTICNT_MASK) >> 1866 HCCHAR_MULTICNT_SHIFT); 1867 1868 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1869 if (dbg_hc(chan)) 1870 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1871 chan->hc_num); 1872 1873 chan->xfer_started = 1; 1874 chan->requests++; 1875 1876 if (hsotg->core_params->dma_enable <= 0 && 1877 !chan->ep_is_in && chan->xfer_len > 0) 1878 /* Load OUT packet into the appropriate Tx FIFO */ 1879 dwc2_hc_write_packet(hsotg, chan); 1880 } 1881 1882 /** 1883 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 1884 * host channel and starts the transfer in Descriptor DMA mode 1885 * 1886 * @hsotg: Programming view of DWC_otg controller 1887 * @chan: Information needed to initialize the host channel 1888 * 1889 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 1890 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 1891 * with micro-frame bitmap. 1892 * 1893 * Initializes HCDMA register with descriptor list address and CTD value then 1894 * starts the transfer via enabling the channel. 1895 */ 1896 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 1897 struct dwc2_host_chan *chan) 1898 { 1899 u32 hcchar; 1900 u32 hc_dma; 1901 u32 hctsiz = 0; 1902 1903 if (chan->do_ping) 1904 hctsiz |= TSIZ_DOPNG; 1905 1906 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1907 dwc2_set_pid_isoc(chan); 1908 1909 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 1910 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1911 TSIZ_SC_MC_PID_MASK; 1912 1913 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 1914 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 1915 1916 /* Non-zero only for high-speed interrupt endpoints */ 1917 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 1918 1919 if (dbg_hc(chan)) { 1920 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1921 chan->hc_num); 1922 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1923 chan->data_pid_start); 1924 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 1925 } 1926 1927 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1928 1929 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; 1930 1931 /* Always start from first descriptor */ 1932 hc_dma &= ~HCDMA_CTD_MASK; 1933 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num)); 1934 if (dbg_hc(chan)) 1935 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", 1936 hc_dma, chan->hc_num); 1937 1938 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1939 hcchar &= ~HCCHAR_MULTICNT_MASK; 1940 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1941 HCCHAR_MULTICNT_MASK; 1942 1943 if (hcchar & HCCHAR_CHDIS) 1944 dev_warn(hsotg->dev, 1945 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1946 __func__, chan->hc_num, hcchar); 1947 1948 /* Set host channel enable after all other setup is complete */ 1949 hcchar |= HCCHAR_CHENA; 1950 hcchar &= ~HCCHAR_CHDIS; 1951 1952 if (dbg_hc(chan)) 1953 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1954 (hcchar & HCCHAR_MULTICNT_MASK) >> 1955 HCCHAR_MULTICNT_SHIFT); 1956 1957 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1958 if (dbg_hc(chan)) 1959 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1960 chan->hc_num); 1961 1962 chan->xfer_started = 1; 1963 chan->requests++; 1964 } 1965 1966 /** 1967 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 1968 * a previous call to dwc2_hc_start_transfer() 1969 * 1970 * @hsotg: Programming view of DWC_otg controller 1971 * @chan: Information needed to initialize the host channel 1972 * 1973 * The caller must ensure there is sufficient space in the request queue and Tx 1974 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 1975 * the controller acts autonomously to complete transfers programmed to a host 1976 * channel. 1977 * 1978 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 1979 * if there is any data remaining to be queued. For an IN transfer, another 1980 * data packet is always requested. For the SETUP phase of a control transfer, 1981 * this function does nothing. 1982 * 1983 * Return: 1 if a new request is queued, 0 if no more requests are required 1984 * for this transfer 1985 */ 1986 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 1987 struct dwc2_host_chan *chan) 1988 { 1989 if (dbg_hc(chan)) 1990 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1991 chan->hc_num); 1992 1993 if (chan->do_split) 1994 /* SPLITs always queue just once per channel */ 1995 return 0; 1996 1997 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 1998 /* SETUPs are queued only once since they can't be NAK'd */ 1999 return 0; 2000 2001 if (chan->ep_is_in) { 2002 /* 2003 * Always queue another request for other IN transfers. If 2004 * back-to-back INs are issued and NAKs are received for both, 2005 * the driver may still be processing the first NAK when the 2006 * second NAK is received. When the interrupt handler clears 2007 * the NAK interrupt for the first NAK, the second NAK will 2008 * not be seen. So we can't depend on the NAK interrupt 2009 * handler to requeue a NAK'd request. Instead, IN requests 2010 * are issued each time this function is called. When the 2011 * transfer completes, the extra requests for the channel will 2012 * be flushed. 2013 */ 2014 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 2015 2016 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2017 hcchar |= HCCHAR_CHENA; 2018 hcchar &= ~HCCHAR_CHDIS; 2019 if (dbg_hc(chan)) 2020 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 2021 hcchar); 2022 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2023 chan->requests++; 2024 return 1; 2025 } 2026 2027 /* OUT transfers */ 2028 2029 if (chan->xfer_count < chan->xfer_len) { 2030 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2031 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2032 u32 hcchar = readl(hsotg->regs + 2033 HCCHAR(chan->hc_num)); 2034 2035 dwc2_hc_set_even_odd_frame(hsotg, chan, 2036 &hcchar); 2037 } 2038 2039 /* Load OUT packet into the appropriate Tx FIFO */ 2040 dwc2_hc_write_packet(hsotg, chan); 2041 chan->requests++; 2042 return 1; 2043 } 2044 2045 return 0; 2046 } 2047 2048 /** 2049 * dwc2_hc_do_ping() - Starts a PING transfer 2050 * 2051 * @hsotg: Programming view of DWC_otg controller 2052 * @chan: Information needed to initialize the host channel 2053 * 2054 * This function should only be called in Slave mode. The Do Ping bit is set in 2055 * the HCTSIZ register, then the channel is enabled. 2056 */ 2057 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 2058 { 2059 u32 hcchar; 2060 u32 hctsiz; 2061 2062 if (dbg_hc(chan)) 2063 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2064 chan->hc_num); 2065 2066 2067 hctsiz = TSIZ_DOPNG; 2068 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 2069 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 2070 2071 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 2072 hcchar |= HCCHAR_CHENA; 2073 hcchar &= ~HCCHAR_CHDIS; 2074 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2075 } 2076 2077 /** 2078 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 2079 * the HFIR register according to PHY type and speed 2080 * 2081 * @hsotg: Programming view of DWC_otg controller 2082 * 2083 * NOTE: The caller can modify the value of the HFIR register only after the 2084 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 2085 * has been set 2086 */ 2087 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 2088 { 2089 u32 usbcfg; 2090 u32 hprt0; 2091 int clock = 60; /* default value */ 2092 2093 usbcfg = readl(hsotg->regs + GUSBCFG); 2094 hprt0 = readl(hsotg->regs + HPRT0); 2095 2096 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 2097 !(usbcfg & GUSBCFG_PHYIF16)) 2098 clock = 60; 2099 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 2100 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 2101 clock = 48; 2102 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2103 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2104 clock = 30; 2105 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2106 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 2107 clock = 60; 2108 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2109 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2110 clock = 48; 2111 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 2112 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 2113 clock = 48; 2114 if ((usbcfg & GUSBCFG_PHYSEL) && 2115 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2116 clock = 48; 2117 2118 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 2119 /* High speed case */ 2120 return 125 * clock; 2121 else 2122 /* FS/LS case */ 2123 return 1000 * clock; 2124 } 2125 2126 /** 2127 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 2128 * buffer 2129 * 2130 * @core_if: Programming view of DWC_otg controller 2131 * @dest: Destination buffer for the packet 2132 * @bytes: Number of bytes to copy to the destination 2133 */ 2134 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 2135 { 2136 u32 __iomem *fifo = hsotg->regs + HCFIFO(0); 2137 u32 *data_buf = (u32 *)dest; 2138 int word_count = (bytes + 3) / 4; 2139 int i; 2140 2141 /* 2142 * Todo: Account for the case where dest is not dword aligned. This 2143 * requires reading data from the FIFO into a u32 temp buffer, then 2144 * moving it into the data buffer. 2145 */ 2146 2147 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 2148 2149 for (i = 0; i < word_count; i++, data_buf++) 2150 *data_buf = readl(fifo); 2151 } 2152 2153 /** 2154 * dwc2_dump_host_registers() - Prints the host registers 2155 * 2156 * @hsotg: Programming view of DWC_otg controller 2157 * 2158 * NOTE: This function will be removed once the peripheral controller code 2159 * is integrated and the driver is stable 2160 */ 2161 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 2162 { 2163 #ifdef DEBUG 2164 u32 __iomem *addr; 2165 int i; 2166 2167 dev_dbg(hsotg->dev, "Host Global Registers\n"); 2168 addr = hsotg->regs + HCFG; 2169 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 2170 (unsigned long)addr, readl(addr)); 2171 addr = hsotg->regs + HFIR; 2172 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 2173 (unsigned long)addr, readl(addr)); 2174 addr = hsotg->regs + HFNUM; 2175 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 2176 (unsigned long)addr, readl(addr)); 2177 addr = hsotg->regs + HPTXSTS; 2178 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 2179 (unsigned long)addr, readl(addr)); 2180 addr = hsotg->regs + HAINT; 2181 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 2182 (unsigned long)addr, readl(addr)); 2183 addr = hsotg->regs + HAINTMSK; 2184 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 2185 (unsigned long)addr, readl(addr)); 2186 if (hsotg->core_params->dma_desc_enable > 0) { 2187 addr = hsotg->regs + HFLBADDR; 2188 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 2189 (unsigned long)addr, readl(addr)); 2190 } 2191 2192 addr = hsotg->regs + HPRT0; 2193 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 2194 (unsigned long)addr, readl(addr)); 2195 2196 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2197 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 2198 addr = hsotg->regs + HCCHAR(i); 2199 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 2200 (unsigned long)addr, readl(addr)); 2201 addr = hsotg->regs + HCSPLT(i); 2202 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 2203 (unsigned long)addr, readl(addr)); 2204 addr = hsotg->regs + HCINT(i); 2205 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 2206 (unsigned long)addr, readl(addr)); 2207 addr = hsotg->regs + HCINTMSK(i); 2208 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 2209 (unsigned long)addr, readl(addr)); 2210 addr = hsotg->regs + HCTSIZ(i); 2211 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 2212 (unsigned long)addr, readl(addr)); 2213 addr = hsotg->regs + HCDMA(i); 2214 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 2215 (unsigned long)addr, readl(addr)); 2216 if (hsotg->core_params->dma_desc_enable > 0) { 2217 addr = hsotg->regs + HCDMAB(i); 2218 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 2219 (unsigned long)addr, readl(addr)); 2220 } 2221 } 2222 #endif 2223 } 2224 2225 /** 2226 * dwc2_dump_global_registers() - Prints the core global registers 2227 * 2228 * @hsotg: Programming view of DWC_otg controller 2229 * 2230 * NOTE: This function will be removed once the peripheral controller code 2231 * is integrated and the driver is stable 2232 */ 2233 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 2234 { 2235 #ifdef DEBUG 2236 u32 __iomem *addr; 2237 2238 dev_dbg(hsotg->dev, "Core Global Registers\n"); 2239 addr = hsotg->regs + GOTGCTL; 2240 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 2241 (unsigned long)addr, readl(addr)); 2242 addr = hsotg->regs + GOTGINT; 2243 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 2244 (unsigned long)addr, readl(addr)); 2245 addr = hsotg->regs + GAHBCFG; 2246 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 2247 (unsigned long)addr, readl(addr)); 2248 addr = hsotg->regs + GUSBCFG; 2249 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 2250 (unsigned long)addr, readl(addr)); 2251 addr = hsotg->regs + GRSTCTL; 2252 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 2253 (unsigned long)addr, readl(addr)); 2254 addr = hsotg->regs + GINTSTS; 2255 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 2256 (unsigned long)addr, readl(addr)); 2257 addr = hsotg->regs + GINTMSK; 2258 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 2259 (unsigned long)addr, readl(addr)); 2260 addr = hsotg->regs + GRXSTSR; 2261 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 2262 (unsigned long)addr, readl(addr)); 2263 addr = hsotg->regs + GRXFSIZ; 2264 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 2265 (unsigned long)addr, readl(addr)); 2266 addr = hsotg->regs + GNPTXFSIZ; 2267 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 2268 (unsigned long)addr, readl(addr)); 2269 addr = hsotg->regs + GNPTXSTS; 2270 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 2271 (unsigned long)addr, readl(addr)); 2272 addr = hsotg->regs + GI2CCTL; 2273 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 2274 (unsigned long)addr, readl(addr)); 2275 addr = hsotg->regs + GPVNDCTL; 2276 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 2277 (unsigned long)addr, readl(addr)); 2278 addr = hsotg->regs + GGPIO; 2279 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 2280 (unsigned long)addr, readl(addr)); 2281 addr = hsotg->regs + GUID; 2282 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 2283 (unsigned long)addr, readl(addr)); 2284 addr = hsotg->regs + GSNPSID; 2285 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 2286 (unsigned long)addr, readl(addr)); 2287 addr = hsotg->regs + GHWCFG1; 2288 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 2289 (unsigned long)addr, readl(addr)); 2290 addr = hsotg->regs + GHWCFG2; 2291 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 2292 (unsigned long)addr, readl(addr)); 2293 addr = hsotg->regs + GHWCFG3; 2294 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 2295 (unsigned long)addr, readl(addr)); 2296 addr = hsotg->regs + GHWCFG4; 2297 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 2298 (unsigned long)addr, readl(addr)); 2299 addr = hsotg->regs + GLPMCFG; 2300 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 2301 (unsigned long)addr, readl(addr)); 2302 addr = hsotg->regs + GPWRDN; 2303 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 2304 (unsigned long)addr, readl(addr)); 2305 addr = hsotg->regs + GDFIFOCFG; 2306 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 2307 (unsigned long)addr, readl(addr)); 2308 addr = hsotg->regs + HPTXFSIZ; 2309 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 2310 (unsigned long)addr, readl(addr)); 2311 2312 addr = hsotg->regs + PCGCTL; 2313 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 2314 (unsigned long)addr, readl(addr)); 2315 #endif 2316 } 2317 2318 /** 2319 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 2320 * 2321 * @hsotg: Programming view of DWC_otg controller 2322 * @num: Tx FIFO to flush 2323 */ 2324 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 2325 { 2326 u32 greset; 2327 int count = 0; 2328 2329 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 2330 2331 greset = GRSTCTL_TXFFLSH; 2332 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 2333 writel(greset, hsotg->regs + GRSTCTL); 2334 2335 do { 2336 greset = readl(hsotg->regs + GRSTCTL); 2337 if (++count > 10000) { 2338 dev_warn(hsotg->dev, 2339 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 2340 __func__, greset, 2341 readl(hsotg->regs + GNPTXSTS)); 2342 break; 2343 } 2344 udelay(1); 2345 } while (greset & GRSTCTL_TXFFLSH); 2346 2347 /* Wait for at least 3 PHY Clocks */ 2348 udelay(1); 2349 } 2350 2351 /** 2352 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 2353 * 2354 * @hsotg: Programming view of DWC_otg controller 2355 */ 2356 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 2357 { 2358 u32 greset; 2359 int count = 0; 2360 2361 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2362 2363 greset = GRSTCTL_RXFFLSH; 2364 writel(greset, hsotg->regs + GRSTCTL); 2365 2366 do { 2367 greset = readl(hsotg->regs + GRSTCTL); 2368 if (++count > 10000) { 2369 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2370 __func__, greset); 2371 break; 2372 } 2373 udelay(1); 2374 } while (greset & GRSTCTL_RXFFLSH); 2375 2376 /* Wait for at least 3 PHY Clocks */ 2377 udelay(1); 2378 } 2379 2380 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2381 2382 /* Parameter access functions */ 2383 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2384 { 2385 int valid = 1; 2386 2387 switch (val) { 2388 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2389 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2390 valid = 0; 2391 break; 2392 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2393 switch (hsotg->hw_params.op_mode) { 2394 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2395 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2396 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2397 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2398 break; 2399 default: 2400 valid = 0; 2401 break; 2402 } 2403 break; 2404 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2405 /* always valid */ 2406 break; 2407 default: 2408 valid = 0; 2409 break; 2410 } 2411 2412 if (!valid) { 2413 if (val >= 0) 2414 dev_err(hsotg->dev, 2415 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2416 val); 2417 switch (hsotg->hw_params.op_mode) { 2418 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2419 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2420 break; 2421 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2422 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2423 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2424 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2425 break; 2426 default: 2427 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2428 break; 2429 } 2430 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2431 } 2432 2433 hsotg->core_params->otg_cap = val; 2434 } 2435 2436 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2437 { 2438 int valid = 1; 2439 2440 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2441 valid = 0; 2442 if (val < 0) 2443 valid = 0; 2444 2445 if (!valid) { 2446 if (val >= 0) 2447 dev_err(hsotg->dev, 2448 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2449 val); 2450 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2451 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2452 } 2453 2454 hsotg->core_params->dma_enable = val; 2455 } 2456 2457 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2458 { 2459 int valid = 1; 2460 2461 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2462 !hsotg->hw_params.dma_desc_enable)) 2463 valid = 0; 2464 if (val < 0) 2465 valid = 0; 2466 2467 if (!valid) { 2468 if (val >= 0) 2469 dev_err(hsotg->dev, 2470 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2471 val); 2472 val = (hsotg->core_params->dma_enable > 0 && 2473 hsotg->hw_params.dma_desc_enable); 2474 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2475 } 2476 2477 hsotg->core_params->dma_desc_enable = val; 2478 } 2479 2480 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2481 int val) 2482 { 2483 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2484 if (val >= 0) { 2485 dev_err(hsotg->dev, 2486 "Wrong value for host_support_fs_low_power\n"); 2487 dev_err(hsotg->dev, 2488 "host_support_fs_low_power must be 0 or 1\n"); 2489 } 2490 val = 0; 2491 dev_dbg(hsotg->dev, 2492 "Setting host_support_fs_low_power to %d\n", val); 2493 } 2494 2495 hsotg->core_params->host_support_fs_ls_low_power = val; 2496 } 2497 2498 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2499 { 2500 int valid = 1; 2501 2502 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2503 valid = 0; 2504 if (val < 0) 2505 valid = 0; 2506 2507 if (!valid) { 2508 if (val >= 0) 2509 dev_err(hsotg->dev, 2510 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2511 val); 2512 val = hsotg->hw_params.enable_dynamic_fifo; 2513 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2514 } 2515 2516 hsotg->core_params->enable_dynamic_fifo = val; 2517 } 2518 2519 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2520 { 2521 int valid = 1; 2522 2523 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2524 valid = 0; 2525 2526 if (!valid) { 2527 if (val >= 0) 2528 dev_err(hsotg->dev, 2529 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2530 val); 2531 val = hsotg->hw_params.host_rx_fifo_size; 2532 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2533 } 2534 2535 hsotg->core_params->host_rx_fifo_size = val; 2536 } 2537 2538 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2539 { 2540 int valid = 1; 2541 2542 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2543 valid = 0; 2544 2545 if (!valid) { 2546 if (val >= 0) 2547 dev_err(hsotg->dev, 2548 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2549 val); 2550 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2551 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2552 val); 2553 } 2554 2555 hsotg->core_params->host_nperio_tx_fifo_size = val; 2556 } 2557 2558 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2559 { 2560 int valid = 1; 2561 2562 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2563 valid = 0; 2564 2565 if (!valid) { 2566 if (val >= 0) 2567 dev_err(hsotg->dev, 2568 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2569 val); 2570 val = hsotg->hw_params.host_perio_tx_fifo_size; 2571 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2572 val); 2573 } 2574 2575 hsotg->core_params->host_perio_tx_fifo_size = val; 2576 } 2577 2578 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2579 { 2580 int valid = 1; 2581 2582 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2583 valid = 0; 2584 2585 if (!valid) { 2586 if (val >= 0) 2587 dev_err(hsotg->dev, 2588 "%d invalid for max_transfer_size. Check HW configuration.\n", 2589 val); 2590 val = hsotg->hw_params.max_transfer_size; 2591 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2592 } 2593 2594 hsotg->core_params->max_transfer_size = val; 2595 } 2596 2597 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2598 { 2599 int valid = 1; 2600 2601 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2602 valid = 0; 2603 2604 if (!valid) { 2605 if (val >= 0) 2606 dev_err(hsotg->dev, 2607 "%d invalid for max_packet_count. Check HW configuration.\n", 2608 val); 2609 val = hsotg->hw_params.max_packet_count; 2610 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2611 } 2612 2613 hsotg->core_params->max_packet_count = val; 2614 } 2615 2616 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2617 { 2618 int valid = 1; 2619 2620 if (val < 1 || val > hsotg->hw_params.host_channels) 2621 valid = 0; 2622 2623 if (!valid) { 2624 if (val >= 0) 2625 dev_err(hsotg->dev, 2626 "%d invalid for host_channels. Check HW configuration.\n", 2627 val); 2628 val = hsotg->hw_params.host_channels; 2629 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2630 } 2631 2632 hsotg->core_params->host_channels = val; 2633 } 2634 2635 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2636 { 2637 int valid = 0; 2638 u32 hs_phy_type, fs_phy_type; 2639 2640 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2641 DWC2_PHY_TYPE_PARAM_ULPI)) { 2642 if (val >= 0) { 2643 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2644 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2645 } 2646 2647 valid = 0; 2648 } 2649 2650 hs_phy_type = hsotg->hw_params.hs_phy_type; 2651 fs_phy_type = hsotg->hw_params.fs_phy_type; 2652 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2653 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2654 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2655 valid = 1; 2656 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2657 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2658 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2659 valid = 1; 2660 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2661 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2662 valid = 1; 2663 2664 if (!valid) { 2665 if (val >= 0) 2666 dev_err(hsotg->dev, 2667 "%d invalid for phy_type. Check HW configuration.\n", 2668 val); 2669 val = DWC2_PHY_TYPE_PARAM_FS; 2670 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2671 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2672 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2673 val = DWC2_PHY_TYPE_PARAM_UTMI; 2674 else 2675 val = DWC2_PHY_TYPE_PARAM_ULPI; 2676 } 2677 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2678 } 2679 2680 hsotg->core_params->phy_type = val; 2681 } 2682 2683 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2684 { 2685 return hsotg->core_params->phy_type; 2686 } 2687 2688 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2689 { 2690 int valid = 1; 2691 2692 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2693 if (val >= 0) { 2694 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2695 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2696 } 2697 valid = 0; 2698 } 2699 2700 if (val == DWC2_SPEED_PARAM_HIGH && 2701 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2702 valid = 0; 2703 2704 if (!valid) { 2705 if (val >= 0) 2706 dev_err(hsotg->dev, 2707 "%d invalid for speed parameter. Check HW configuration.\n", 2708 val); 2709 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2710 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2711 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2712 } 2713 2714 hsotg->core_params->speed = val; 2715 } 2716 2717 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2718 { 2719 int valid = 1; 2720 2721 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2722 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2723 if (val >= 0) { 2724 dev_err(hsotg->dev, 2725 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2726 dev_err(hsotg->dev, 2727 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2728 } 2729 valid = 0; 2730 } 2731 2732 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2733 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2734 valid = 0; 2735 2736 if (!valid) { 2737 if (val >= 0) 2738 dev_err(hsotg->dev, 2739 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2740 val); 2741 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2742 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2743 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2744 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2745 val); 2746 } 2747 2748 hsotg->core_params->host_ls_low_power_phy_clk = val; 2749 } 2750 2751 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2752 { 2753 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2754 if (val >= 0) { 2755 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2756 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2757 } 2758 val = 0; 2759 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2760 } 2761 2762 hsotg->core_params->phy_ulpi_ddr = val; 2763 } 2764 2765 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2766 { 2767 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2768 if (val >= 0) { 2769 dev_err(hsotg->dev, 2770 "Wrong value for phy_ulpi_ext_vbus\n"); 2771 dev_err(hsotg->dev, 2772 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2773 } 2774 val = 0; 2775 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2776 } 2777 2778 hsotg->core_params->phy_ulpi_ext_vbus = val; 2779 } 2780 2781 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2782 { 2783 int valid = 0; 2784 2785 switch (hsotg->hw_params.utmi_phy_data_width) { 2786 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2787 valid = (val == 8); 2788 break; 2789 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2790 valid = (val == 16); 2791 break; 2792 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2793 valid = (val == 8 || val == 16); 2794 break; 2795 } 2796 2797 if (!valid) { 2798 if (val >= 0) { 2799 dev_err(hsotg->dev, 2800 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2801 val); 2802 } 2803 val = (hsotg->hw_params.utmi_phy_data_width == 2804 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2805 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2806 } 2807 2808 hsotg->core_params->phy_utmi_width = val; 2809 } 2810 2811 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 2812 { 2813 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2814 if (val >= 0) { 2815 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 2816 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 2817 } 2818 val = 0; 2819 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 2820 } 2821 2822 hsotg->core_params->ulpi_fs_ls = val; 2823 } 2824 2825 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 2826 { 2827 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2828 if (val >= 0) { 2829 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 2830 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 2831 } 2832 val = 0; 2833 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 2834 } 2835 2836 hsotg->core_params->ts_dline = val; 2837 } 2838 2839 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 2840 { 2841 int valid = 1; 2842 2843 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2844 if (val >= 0) { 2845 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 2846 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 2847 } 2848 2849 valid = 0; 2850 } 2851 2852 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 2853 valid = 0; 2854 2855 if (!valid) { 2856 if (val >= 0) 2857 dev_err(hsotg->dev, 2858 "%d invalid for i2c_enable. Check HW configuration.\n", 2859 val); 2860 val = hsotg->hw_params.i2c_enable; 2861 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 2862 } 2863 2864 hsotg->core_params->i2c_enable = val; 2865 } 2866 2867 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 2868 { 2869 int valid = 1; 2870 2871 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2872 if (val >= 0) { 2873 dev_err(hsotg->dev, 2874 "Wrong value for en_multiple_tx_fifo,\n"); 2875 dev_err(hsotg->dev, 2876 "en_multiple_tx_fifo must be 0 or 1\n"); 2877 } 2878 valid = 0; 2879 } 2880 2881 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 2882 valid = 0; 2883 2884 if (!valid) { 2885 if (val >= 0) 2886 dev_err(hsotg->dev, 2887 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 2888 val); 2889 val = hsotg->hw_params.en_multiple_tx_fifo; 2890 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 2891 } 2892 2893 hsotg->core_params->en_multiple_tx_fifo = val; 2894 } 2895 2896 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 2897 { 2898 int valid = 1; 2899 2900 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2901 if (val >= 0) { 2902 dev_err(hsotg->dev, 2903 "'%d' invalid for parameter reload_ctl\n", val); 2904 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 2905 } 2906 valid = 0; 2907 } 2908 2909 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 2910 valid = 0; 2911 2912 if (!valid) { 2913 if (val >= 0) 2914 dev_err(hsotg->dev, 2915 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 2916 val); 2917 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 2918 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 2919 } 2920 2921 hsotg->core_params->reload_ctl = val; 2922 } 2923 2924 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 2925 { 2926 if (val != -1) 2927 hsotg->core_params->ahbcfg = val; 2928 else 2929 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 2930 GAHBCFG_HBSTLEN_SHIFT; 2931 } 2932 2933 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 2934 { 2935 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2936 if (val >= 0) { 2937 dev_err(hsotg->dev, 2938 "'%d' invalid for parameter otg_ver\n", val); 2939 dev_err(hsotg->dev, 2940 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 2941 } 2942 val = 0; 2943 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 2944 } 2945 2946 hsotg->core_params->otg_ver = val; 2947 } 2948 2949 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 2950 { 2951 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2952 if (val >= 0) { 2953 dev_err(hsotg->dev, 2954 "'%d' invalid for parameter uframe_sched\n", 2955 val); 2956 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 2957 } 2958 val = 1; 2959 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 2960 } 2961 2962 hsotg->core_params->uframe_sched = val; 2963 } 2964 2965 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 2966 int val) 2967 { 2968 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2969 if (val >= 0) { 2970 dev_err(hsotg->dev, 2971 "'%d' invalid for parameter external_id_pin_ctl\n", 2972 val); 2973 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 2974 } 2975 val = 0; 2976 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 2977 } 2978 2979 hsotg->core_params->external_id_pin_ctl = val; 2980 } 2981 2982 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 2983 int val) 2984 { 2985 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2986 if (val >= 0) { 2987 dev_err(hsotg->dev, 2988 "'%d' invalid for parameter hibernation\n", 2989 val); 2990 dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 2991 } 2992 val = 0; 2993 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 2994 } 2995 2996 hsotg->core_params->hibernation = val; 2997 } 2998 2999 /* 3000 * This function is called during module intialization to pass module parameters 3001 * for the DWC_otg core. 3002 */ 3003 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 3004 const struct dwc2_core_params *params) 3005 { 3006 dev_dbg(hsotg->dev, "%s()\n", __func__); 3007 3008 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 3009 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 3010 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 3011 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 3012 params->host_support_fs_ls_low_power); 3013 dwc2_set_param_enable_dynamic_fifo(hsotg, 3014 params->enable_dynamic_fifo); 3015 dwc2_set_param_host_rx_fifo_size(hsotg, 3016 params->host_rx_fifo_size); 3017 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 3018 params->host_nperio_tx_fifo_size); 3019 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 3020 params->host_perio_tx_fifo_size); 3021 dwc2_set_param_max_transfer_size(hsotg, 3022 params->max_transfer_size); 3023 dwc2_set_param_max_packet_count(hsotg, 3024 params->max_packet_count); 3025 dwc2_set_param_host_channels(hsotg, params->host_channels); 3026 dwc2_set_param_phy_type(hsotg, params->phy_type); 3027 dwc2_set_param_speed(hsotg, params->speed); 3028 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 3029 params->host_ls_low_power_phy_clk); 3030 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 3031 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 3032 params->phy_ulpi_ext_vbus); 3033 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 3034 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 3035 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 3036 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 3037 dwc2_set_param_en_multiple_tx_fifo(hsotg, 3038 params->en_multiple_tx_fifo); 3039 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 3040 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 3041 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 3042 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 3043 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 3044 dwc2_set_param_hibernation(hsotg, params->hibernation); 3045 } 3046 3047 /** 3048 * During device initialization, read various hardware configuration 3049 * registers and interpret the contents. 3050 */ 3051 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3052 { 3053 struct dwc2_hw_params *hw = &hsotg->hw_params; 3054 unsigned width; 3055 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 3056 u32 hptxfsiz, grxfsiz, gnptxfsiz; 3057 u32 gusbcfg; 3058 3059 /* 3060 * Attempt to ensure this device is really a DWC_otg Controller. 3061 * Read and verify the GSNPSID register contents. The value should be 3062 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 3063 * as in "OTG version 2.xx" or "OTG version 3.xx". 3064 */ 3065 hw->snpsid = readl(hsotg->regs + GSNPSID); 3066 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 3067 (hw->snpsid & 0xfffff000) != 0x4f543000) { 3068 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 3069 hw->snpsid); 3070 return -ENODEV; 3071 } 3072 3073 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 3074 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3075 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3076 3077 hwcfg1 = readl(hsotg->regs + GHWCFG1); 3078 hwcfg2 = readl(hsotg->regs + GHWCFG2); 3079 hwcfg3 = readl(hsotg->regs + GHWCFG3); 3080 hwcfg4 = readl(hsotg->regs + GHWCFG4); 3081 grxfsiz = readl(hsotg->regs + GRXFSIZ); 3082 3083 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 3084 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 3085 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 3086 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 3087 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 3088 3089 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */ 3090 gusbcfg = readl(hsotg->regs + GUSBCFG); 3091 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 3092 writel(gusbcfg, hsotg->regs + GUSBCFG); 3093 usleep_range(100000, 150000); 3094 3095 gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ); 3096 hptxfsiz = readl(hsotg->regs + HPTXFSIZ); 3097 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3098 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 3099 gusbcfg = readl(hsotg->regs + GUSBCFG); 3100 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 3101 writel(gusbcfg, hsotg->regs + GUSBCFG); 3102 usleep_range(100000, 150000); 3103 3104 /* hwcfg2 */ 3105 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3106 GHWCFG2_OP_MODE_SHIFT; 3107 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 3108 GHWCFG2_ARCHITECTURE_SHIFT; 3109 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 3110 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 3111 GHWCFG2_NUM_HOST_CHAN_SHIFT); 3112 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 3113 GHWCFG2_HS_PHY_TYPE_SHIFT; 3114 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 3115 GHWCFG2_FS_PHY_TYPE_SHIFT; 3116 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 3117 GHWCFG2_NUM_DEV_EP_SHIFT; 3118 hw->nperio_tx_q_depth = 3119 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 3120 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 3121 hw->host_perio_tx_q_depth = 3122 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 3123 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 3124 hw->dev_token_q_depth = 3125 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 3126 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 3127 3128 /* hwcfg3 */ 3129 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 3130 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 3131 hw->max_transfer_size = (1 << (width + 11)) - 1; 3132 /* 3133 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates 3134 * coherent buffers with this size, and if it's too large we can 3135 * exhaust the coherent DMA pool. 3136 */ 3137 if (hw->max_transfer_size > 65535) 3138 hw->max_transfer_size = 65535; 3139 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 3140 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 3141 hw->max_packet_count = (1 << (width + 4)) - 1; 3142 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 3143 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 3144 GHWCFG3_DFIFO_DEPTH_SHIFT; 3145 3146 /* hwcfg4 */ 3147 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 3148 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 3149 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 3150 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 3151 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 3152 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 3153 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 3154 3155 /* fifo sizes */ 3156 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 3157 GRXFSIZ_DEPTH_SHIFT; 3158 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3159 FIFOSIZE_DEPTH_SHIFT; 3160 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3161 FIFOSIZE_DEPTH_SHIFT; 3162 3163 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 3164 dev_dbg(hsotg->dev, " op_mode=%d\n", 3165 hw->op_mode); 3166 dev_dbg(hsotg->dev, " arch=%d\n", 3167 hw->arch); 3168 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 3169 hw->dma_desc_enable); 3170 dev_dbg(hsotg->dev, " power_optimized=%d\n", 3171 hw->power_optimized); 3172 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 3173 hw->i2c_enable); 3174 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 3175 hw->hs_phy_type); 3176 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 3177 hw->fs_phy_type); 3178 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n", 3179 hw->utmi_phy_data_width); 3180 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 3181 hw->num_dev_ep); 3182 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 3183 hw->num_dev_perio_in_ep); 3184 dev_dbg(hsotg->dev, " host_channels=%d\n", 3185 hw->host_channels); 3186 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 3187 hw->max_transfer_size); 3188 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 3189 hw->max_packet_count); 3190 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 3191 hw->nperio_tx_q_depth); 3192 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 3193 hw->host_perio_tx_q_depth); 3194 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 3195 hw->dev_token_q_depth); 3196 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 3197 hw->enable_dynamic_fifo); 3198 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 3199 hw->en_multiple_tx_fifo); 3200 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 3201 hw->total_fifo_size); 3202 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 3203 hw->host_rx_fifo_size); 3204 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 3205 hw->host_nperio_tx_fifo_size); 3206 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 3207 hw->host_perio_tx_fifo_size); 3208 dev_dbg(hsotg->dev, "\n"); 3209 3210 return 0; 3211 } 3212 3213 /* 3214 * Sets all parameters to the given value. 3215 * 3216 * Assumes that the dwc2_core_params struct contains only integers. 3217 */ 3218 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 3219 { 3220 int *p = (int *)params; 3221 size_t size = sizeof(*params) / sizeof(*p); 3222 int i; 3223 3224 for (i = 0; i < size; i++) 3225 p[i] = value; 3226 } 3227 3228 3229 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 3230 { 3231 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 3232 } 3233 3234 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 3235 { 3236 if (readl(hsotg->regs + GSNPSID) == 0xffffffff) 3237 return false; 3238 else 3239 return true; 3240 } 3241 3242 /** 3243 * dwc2_enable_global_interrupts() - Enables the controller's Global 3244 * Interrupt in the AHB Config register 3245 * 3246 * @hsotg: Programming view of DWC_otg controller 3247 */ 3248 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 3249 { 3250 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 3251 3252 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 3253 writel(ahbcfg, hsotg->regs + GAHBCFG); 3254 } 3255 3256 /** 3257 * dwc2_disable_global_interrupts() - Disables the controller's Global 3258 * Interrupt in the AHB Config register 3259 * 3260 * @hsotg: Programming view of DWC_otg controller 3261 */ 3262 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 3263 { 3264 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 3265 3266 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 3267 writel(ahbcfg, hsotg->regs + GAHBCFG); 3268 } 3269 3270 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); 3271 MODULE_AUTHOR("Synopsys, Inc."); 3272 MODULE_LICENSE("Dual BSD/GPL"); 3273