1 /* 2 * core.c - DesignWare HS OTG Controller common routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * The Core code provides basic services for accessing and managing the 39 * DWC_otg hardware. These services are used by both the Host Controller 40 * Driver and the Peripheral Controller Driver. 41 */ 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <linux/interrupt.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/delay.h> 49 #include <linux/io.h> 50 #include <linux/slab.h> 51 #include <linux/usb.h> 52 53 #include <linux/usb/hcd.h> 54 #include <linux/usb/ch11.h> 55 56 #include "core.h" 57 #include "hcd.h" 58 59 /** 60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 61 * used in both device and host modes 62 * 63 * @hsotg: Programming view of the DWC_otg controller 64 */ 65 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 66 { 67 u32 intmsk; 68 69 /* Clear any pending OTG Interrupts */ 70 writel(0xffffffff, hsotg->regs + GOTGINT); 71 72 /* Clear any pending interrupts */ 73 writel(0xffffffff, hsotg->regs + GINTSTS); 74 75 /* Enable the interrupts in the GINTMSK */ 76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 77 78 if (hsotg->core_params->dma_enable <= 0) 79 intmsk |= GINTSTS_RXFLVL; 80 81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP | 82 GINTSTS_SESSREQINT; 83 84 writel(intmsk, hsotg->regs + GINTMSK); 85 } 86 87 /* 88 * Initializes the FSLSPClkSel field of the HCFG register depending on the 89 * PHY type 90 */ 91 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 92 { 93 u32 hcfg, val; 94 95 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 96 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 97 hsotg->core_params->ulpi_fs_ls > 0) || 98 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 99 /* Full speed PHY */ 100 val = HCFG_FSLSPCLKSEL_48_MHZ; 101 } else { 102 /* High speed PHY running at full speed or high speed */ 103 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 104 } 105 106 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 107 hcfg = readl(hsotg->regs + HCFG); 108 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 109 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 110 writel(hcfg, hsotg->regs + HCFG); 111 } 112 113 /* 114 * Do core a soft reset of the core. Be careful with this because it 115 * resets all the internal state machines of the core. 116 */ 117 static int dwc2_core_reset(struct dwc2_hsotg *hsotg) 118 { 119 u32 greset; 120 int count = 0; 121 122 dev_vdbg(hsotg->dev, "%s()\n", __func__); 123 124 /* Wait for AHB master IDLE state */ 125 do { 126 usleep_range(20000, 40000); 127 greset = readl(hsotg->regs + GRSTCTL); 128 if (++count > 50) { 129 dev_warn(hsotg->dev, 130 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 131 __func__, greset); 132 return -EBUSY; 133 } 134 } while (!(greset & GRSTCTL_AHBIDLE)); 135 136 /* Core Soft Reset */ 137 count = 0; 138 greset |= GRSTCTL_CSFTRST; 139 writel(greset, hsotg->regs + GRSTCTL); 140 do { 141 usleep_range(20000, 40000); 142 greset = readl(hsotg->regs + GRSTCTL); 143 if (++count > 50) { 144 dev_warn(hsotg->dev, 145 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 146 __func__, greset); 147 return -EBUSY; 148 } 149 } while (greset & GRSTCTL_CSFTRST); 150 151 /* 152 * NOTE: This long sleep is _very_ important, otherwise the core will 153 * not stay in host mode after a connector ID change! 154 */ 155 usleep_range(150000, 200000); 156 157 return 0; 158 } 159 160 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 161 { 162 u32 usbcfg, i2cctl; 163 int retval = 0; 164 165 /* 166 * core_init() is now called on every switch so only call the 167 * following for the first time through 168 */ 169 if (select_phy) { 170 dev_dbg(hsotg->dev, "FS PHY selected\n"); 171 usbcfg = readl(hsotg->regs + GUSBCFG); 172 usbcfg |= GUSBCFG_PHYSEL; 173 writel(usbcfg, hsotg->regs + GUSBCFG); 174 175 /* Reset after a PHY select */ 176 retval = dwc2_core_reset(hsotg); 177 if (retval) { 178 dev_err(hsotg->dev, "%s() Reset failed, aborting", 179 __func__); 180 return retval; 181 } 182 } 183 184 /* 185 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 186 * do this on HNP Dev/Host mode switches (done in dev_init and 187 * host_init). 188 */ 189 if (dwc2_is_host_mode(hsotg)) 190 dwc2_init_fs_ls_pclk_sel(hsotg); 191 192 if (hsotg->core_params->i2c_enable > 0) { 193 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 194 195 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 196 usbcfg = readl(hsotg->regs + GUSBCFG); 197 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 198 writel(usbcfg, hsotg->regs + GUSBCFG); 199 200 /* Program GI2CCTL.I2CEn */ 201 i2cctl = readl(hsotg->regs + GI2CCTL); 202 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 203 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 204 i2cctl &= ~GI2CCTL_I2CEN; 205 writel(i2cctl, hsotg->regs + GI2CCTL); 206 i2cctl |= GI2CCTL_I2CEN; 207 writel(i2cctl, hsotg->regs + GI2CCTL); 208 } 209 210 return retval; 211 } 212 213 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 214 { 215 u32 usbcfg; 216 int retval = 0; 217 218 if (!select_phy) 219 return 0; 220 221 usbcfg = readl(hsotg->regs + GUSBCFG); 222 223 /* 224 * HS PHY parameters. These parameters are preserved during soft reset 225 * so only program the first time. Do a soft reset immediately after 226 * setting phyif. 227 */ 228 switch (hsotg->core_params->phy_type) { 229 case DWC2_PHY_TYPE_PARAM_ULPI: 230 /* ULPI interface */ 231 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 232 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 233 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 234 if (hsotg->core_params->phy_ulpi_ddr > 0) 235 usbcfg |= GUSBCFG_DDRSEL; 236 break; 237 case DWC2_PHY_TYPE_PARAM_UTMI: 238 /* UTMI+ interface */ 239 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 240 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 241 if (hsotg->core_params->phy_utmi_width == 16) 242 usbcfg |= GUSBCFG_PHYIF16; 243 break; 244 default: 245 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 246 break; 247 } 248 249 writel(usbcfg, hsotg->regs + GUSBCFG); 250 251 /* Reset after setting the PHY parameters */ 252 retval = dwc2_core_reset(hsotg); 253 if (retval) { 254 dev_err(hsotg->dev, "%s() Reset failed, aborting", 255 __func__); 256 return retval; 257 } 258 259 return retval; 260 } 261 262 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 263 { 264 u32 usbcfg; 265 int retval = 0; 266 267 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 268 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 269 /* If FS mode with FS PHY */ 270 retval = dwc2_fs_phy_init(hsotg, select_phy); 271 if (retval) 272 return retval; 273 } else { 274 /* High speed PHY */ 275 retval = dwc2_hs_phy_init(hsotg, select_phy); 276 if (retval) 277 return retval; 278 } 279 280 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 281 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 282 hsotg->core_params->ulpi_fs_ls > 0) { 283 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 284 usbcfg = readl(hsotg->regs + GUSBCFG); 285 usbcfg |= GUSBCFG_ULPI_FS_LS; 286 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 287 writel(usbcfg, hsotg->regs + GUSBCFG); 288 } else { 289 usbcfg = readl(hsotg->regs + GUSBCFG); 290 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 291 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 292 writel(usbcfg, hsotg->regs + GUSBCFG); 293 } 294 295 return retval; 296 } 297 298 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 299 { 300 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 301 302 switch (hsotg->hw_params.arch) { 303 case GHWCFG2_EXT_DMA_ARCH: 304 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 305 return -EINVAL; 306 307 case GHWCFG2_INT_DMA_ARCH: 308 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 309 if (hsotg->core_params->ahbcfg != -1) { 310 ahbcfg &= GAHBCFG_CTRL_MASK; 311 ahbcfg |= hsotg->core_params->ahbcfg & 312 ~GAHBCFG_CTRL_MASK; 313 } 314 break; 315 316 case GHWCFG2_SLAVE_ONLY_ARCH: 317 default: 318 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 319 break; 320 } 321 322 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 323 hsotg->core_params->dma_enable, 324 hsotg->core_params->dma_desc_enable); 325 326 if (hsotg->core_params->dma_enable > 0) { 327 if (hsotg->core_params->dma_desc_enable > 0) 328 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 329 else 330 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 331 } else { 332 dev_dbg(hsotg->dev, "Using Slave mode\n"); 333 hsotg->core_params->dma_desc_enable = 0; 334 } 335 336 if (hsotg->core_params->dma_enable > 0) 337 ahbcfg |= GAHBCFG_DMA_EN; 338 339 writel(ahbcfg, hsotg->regs + GAHBCFG); 340 341 return 0; 342 } 343 344 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 345 { 346 u32 usbcfg; 347 348 usbcfg = readl(hsotg->regs + GUSBCFG); 349 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 350 351 switch (hsotg->hw_params.op_mode) { 352 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 353 if (hsotg->core_params->otg_cap == 354 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 355 usbcfg |= GUSBCFG_HNPCAP; 356 if (hsotg->core_params->otg_cap != 357 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 358 usbcfg |= GUSBCFG_SRPCAP; 359 break; 360 361 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 362 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 363 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 364 if (hsotg->core_params->otg_cap != 365 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 366 usbcfg |= GUSBCFG_SRPCAP; 367 break; 368 369 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 370 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 371 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 372 default: 373 break; 374 } 375 376 writel(usbcfg, hsotg->regs + GUSBCFG); 377 } 378 379 /** 380 * dwc2_core_init() - Initializes the DWC_otg controller registers and 381 * prepares the core for device mode or host mode operation 382 * 383 * @hsotg: Programming view of the DWC_otg controller 384 * @select_phy: If true then also set the Phy type 385 * @irq: If >= 0, the irq to register 386 */ 387 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) 388 { 389 u32 usbcfg, otgctl; 390 int retval; 391 392 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 393 394 usbcfg = readl(hsotg->regs + GUSBCFG); 395 396 /* Set ULPI External VBUS bit if needed */ 397 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 398 if (hsotg->core_params->phy_ulpi_ext_vbus == 399 DWC2_PHY_ULPI_EXTERNAL_VBUS) 400 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 401 402 /* Set external TS Dline pulsing bit if needed */ 403 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 404 if (hsotg->core_params->ts_dline > 0) 405 usbcfg |= GUSBCFG_TERMSELDLPULSE; 406 407 writel(usbcfg, hsotg->regs + GUSBCFG); 408 409 /* Reset the Controller */ 410 retval = dwc2_core_reset(hsotg); 411 if (retval) { 412 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 413 __func__); 414 return retval; 415 } 416 417 /* 418 * This needs to happen in FS mode before any other programming occurs 419 */ 420 retval = dwc2_phy_init(hsotg, select_phy); 421 if (retval) 422 return retval; 423 424 /* Program the GAHBCFG Register */ 425 retval = dwc2_gahbcfg_init(hsotg); 426 if (retval) 427 return retval; 428 429 /* Program the GUSBCFG register */ 430 dwc2_gusbcfg_init(hsotg); 431 432 /* Program the GOTGCTL register */ 433 otgctl = readl(hsotg->regs + GOTGCTL); 434 otgctl &= ~GOTGCTL_OTGVER; 435 if (hsotg->core_params->otg_ver > 0) 436 otgctl |= GOTGCTL_OTGVER; 437 writel(otgctl, hsotg->regs + GOTGCTL); 438 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 439 440 /* Clear the SRP success bit for FS-I2c */ 441 hsotg->srp_success = 0; 442 443 if (irq >= 0) { 444 dev_dbg(hsotg->dev, "registering common handler for irq%d\n", 445 irq); 446 retval = devm_request_irq(hsotg->dev, irq, 447 dwc2_handle_common_intr, IRQF_SHARED, 448 dev_name(hsotg->dev), hsotg); 449 if (retval) 450 return retval; 451 } 452 453 /* Enable common interrupts */ 454 dwc2_enable_common_interrupts(hsotg); 455 456 /* 457 * Do device or host intialization based on mode during PCD and 458 * HCD initialization 459 */ 460 if (dwc2_is_host_mode(hsotg)) { 461 dev_dbg(hsotg->dev, "Host Mode\n"); 462 hsotg->op_state = OTG_STATE_A_HOST; 463 } else { 464 dev_dbg(hsotg->dev, "Device Mode\n"); 465 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 466 } 467 468 return 0; 469 } 470 471 /** 472 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 473 * 474 * @hsotg: Programming view of DWC_otg controller 475 */ 476 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 477 { 478 u32 intmsk; 479 480 dev_dbg(hsotg->dev, "%s()\n", __func__); 481 482 /* Disable all interrupts */ 483 writel(0, hsotg->regs + GINTMSK); 484 writel(0, hsotg->regs + HAINTMSK); 485 486 /* Enable the common interrupts */ 487 dwc2_enable_common_interrupts(hsotg); 488 489 /* Enable host mode interrupts without disturbing common interrupts */ 490 intmsk = readl(hsotg->regs + GINTMSK); 491 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 492 writel(intmsk, hsotg->regs + GINTMSK); 493 } 494 495 /** 496 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 497 * 498 * @hsotg: Programming view of DWC_otg controller 499 */ 500 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 501 { 502 u32 intmsk = readl(hsotg->regs + GINTMSK); 503 504 /* Disable host mode interrupts without disturbing common interrupts */ 505 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 506 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP); 507 writel(intmsk, hsotg->regs + GINTMSK); 508 } 509 510 /* 511 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 512 * For system that have a total fifo depth that is smaller than the default 513 * RX + TX fifo size. 514 * 515 * @hsotg: Programming view of DWC_otg controller 516 */ 517 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 518 { 519 struct dwc2_core_params *params = hsotg->core_params; 520 struct dwc2_hw_params *hw = &hsotg->hw_params; 521 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 522 523 total_fifo_size = hw->total_fifo_size; 524 rxfsiz = params->host_rx_fifo_size; 525 nptxfsiz = params->host_nperio_tx_fifo_size; 526 ptxfsiz = params->host_perio_tx_fifo_size; 527 528 /* 529 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 530 * allocation with support for high bandwidth endpoints. Synopsys 531 * defines MPS(Max Packet size) for a periodic EP=1024, and for 532 * non-periodic as 512. 533 */ 534 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 535 /* 536 * For Buffer DMA mode/Scatter Gather DMA mode 537 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 538 * with n = number of host channel. 539 * 2 * ((1024/4) + 2) = 516 540 */ 541 rxfsiz = 516 + hw->host_channels; 542 543 /* 544 * min non-periodic tx fifo depth 545 * 2 * (largest non-periodic USB packet used / 4) 546 * 2 * (512/4) = 256 547 */ 548 nptxfsiz = 256; 549 550 /* 551 * min periodic tx fifo depth 552 * (largest packet size*MC)/4 553 * (1024 * 3)/4 = 768 554 */ 555 ptxfsiz = 768; 556 557 params->host_rx_fifo_size = rxfsiz; 558 params->host_nperio_tx_fifo_size = nptxfsiz; 559 params->host_perio_tx_fifo_size = ptxfsiz; 560 } 561 562 /* 563 * If the summation of RX, NPTX and PTX fifo sizes is still 564 * bigger than the total_fifo_size, then we have a problem. 565 * 566 * We won't be able to allocate as many endpoints. Right now, 567 * we're just printing an error message, but ideally this FIFO 568 * allocation algorithm would be improved in the future. 569 * 570 * FIXME improve this FIFO allocation algorithm. 571 */ 572 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) 573 dev_err(hsotg->dev, "invalid fifo sizes\n"); 574 } 575 576 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 577 { 578 struct dwc2_core_params *params = hsotg->core_params; 579 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 580 581 if (!params->enable_dynamic_fifo) 582 return; 583 584 dwc2_calculate_dynamic_fifo(hsotg); 585 586 /* Rx FIFO */ 587 grxfsiz = readl(hsotg->regs + GRXFSIZ); 588 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 589 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 590 grxfsiz |= params->host_rx_fifo_size << 591 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 592 writel(grxfsiz, hsotg->regs + GRXFSIZ); 593 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ)); 594 595 /* Non-periodic Tx FIFO */ 596 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 597 readl(hsotg->regs + GNPTXFSIZ)); 598 nptxfsiz = params->host_nperio_tx_fifo_size << 599 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 600 nptxfsiz |= params->host_rx_fifo_size << 601 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 602 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); 603 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 604 readl(hsotg->regs + GNPTXFSIZ)); 605 606 /* Periodic Tx FIFO */ 607 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 608 readl(hsotg->regs + HPTXFSIZ)); 609 hptxfsiz = params->host_perio_tx_fifo_size << 610 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 611 hptxfsiz |= (params->host_rx_fifo_size + 612 params->host_nperio_tx_fifo_size) << 613 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 614 writel(hptxfsiz, hsotg->regs + HPTXFSIZ); 615 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 616 readl(hsotg->regs + HPTXFSIZ)); 617 618 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 619 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 620 /* 621 * Global DFIFOCFG calculation for Host mode - 622 * include RxFIFO, NPTXFIFO and HPTXFIFO 623 */ 624 dfifocfg = readl(hsotg->regs + GDFIFOCFG); 625 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 626 dfifocfg |= (params->host_rx_fifo_size + 627 params->host_nperio_tx_fifo_size + 628 params->host_perio_tx_fifo_size) << 629 GDFIFOCFG_EPINFOBASE_SHIFT & 630 GDFIFOCFG_EPINFOBASE_MASK; 631 writel(dfifocfg, hsotg->regs + GDFIFOCFG); 632 } 633 } 634 635 /** 636 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 637 * Host mode 638 * 639 * @hsotg: Programming view of DWC_otg controller 640 * 641 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 642 * request queues. Host channels are reset to ensure that they are ready for 643 * performing transfers. 644 */ 645 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 646 { 647 u32 hcfg, hfir, otgctl; 648 649 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 650 651 /* Restart the Phy Clock */ 652 writel(0, hsotg->regs + PCGCTL); 653 654 /* Initialize Host Configuration Register */ 655 dwc2_init_fs_ls_pclk_sel(hsotg); 656 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 657 hcfg = readl(hsotg->regs + HCFG); 658 hcfg |= HCFG_FSLSSUPP; 659 writel(hcfg, hsotg->regs + HCFG); 660 } 661 662 /* 663 * This bit allows dynamic reloading of the HFIR register during 664 * runtime. This bit needs to be programmed during initial configuration 665 * and its value must not be changed during runtime. 666 */ 667 if (hsotg->core_params->reload_ctl > 0) { 668 hfir = readl(hsotg->regs + HFIR); 669 hfir |= HFIR_RLDCTRL; 670 writel(hfir, hsotg->regs + HFIR); 671 } 672 673 if (hsotg->core_params->dma_desc_enable > 0) { 674 u32 op_mode = hsotg->hw_params.op_mode; 675 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 676 !hsotg->hw_params.dma_desc_enable || 677 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 678 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 679 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 680 dev_err(hsotg->dev, 681 "Hardware does not support descriptor DMA mode -\n"); 682 dev_err(hsotg->dev, 683 "falling back to buffer DMA mode.\n"); 684 hsotg->core_params->dma_desc_enable = 0; 685 } else { 686 hcfg = readl(hsotg->regs + HCFG); 687 hcfg |= HCFG_DESCDMA; 688 writel(hcfg, hsotg->regs + HCFG); 689 } 690 } 691 692 /* Configure data FIFO sizes */ 693 dwc2_config_fifos(hsotg); 694 695 /* TODO - check this */ 696 /* Clear Host Set HNP Enable in the OTG Control Register */ 697 otgctl = readl(hsotg->regs + GOTGCTL); 698 otgctl &= ~GOTGCTL_HSTSETHNPEN; 699 writel(otgctl, hsotg->regs + GOTGCTL); 700 701 /* Make sure the FIFOs are flushed */ 702 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 703 dwc2_flush_rx_fifo(hsotg); 704 705 /* Clear Host Set HNP Enable in the OTG Control Register */ 706 otgctl = readl(hsotg->regs + GOTGCTL); 707 otgctl &= ~GOTGCTL_HSTSETHNPEN; 708 writel(otgctl, hsotg->regs + GOTGCTL); 709 710 if (hsotg->core_params->dma_desc_enable <= 0) { 711 int num_channels, i; 712 u32 hcchar; 713 714 /* Flush out any leftover queued requests */ 715 num_channels = hsotg->core_params->host_channels; 716 for (i = 0; i < num_channels; i++) { 717 hcchar = readl(hsotg->regs + HCCHAR(i)); 718 hcchar &= ~HCCHAR_CHENA; 719 hcchar |= HCCHAR_CHDIS; 720 hcchar &= ~HCCHAR_EPDIR; 721 writel(hcchar, hsotg->regs + HCCHAR(i)); 722 } 723 724 /* Halt all channels to put them into a known state */ 725 for (i = 0; i < num_channels; i++) { 726 int count = 0; 727 728 hcchar = readl(hsotg->regs + HCCHAR(i)); 729 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 730 hcchar &= ~HCCHAR_EPDIR; 731 writel(hcchar, hsotg->regs + HCCHAR(i)); 732 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 733 __func__, i); 734 do { 735 hcchar = readl(hsotg->regs + HCCHAR(i)); 736 if (++count > 1000) { 737 dev_err(hsotg->dev, 738 "Unable to clear enable on channel %d\n", 739 i); 740 break; 741 } 742 udelay(1); 743 } while (hcchar & HCCHAR_CHENA); 744 } 745 } 746 747 /* Turn on the vbus power */ 748 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 749 if (hsotg->op_state == OTG_STATE_A_HOST) { 750 u32 hprt0 = dwc2_read_hprt0(hsotg); 751 752 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 753 !!(hprt0 & HPRT0_PWR)); 754 if (!(hprt0 & HPRT0_PWR)) { 755 hprt0 |= HPRT0_PWR; 756 writel(hprt0, hsotg->regs + HPRT0); 757 } 758 } 759 760 dwc2_enable_host_interrupts(hsotg); 761 } 762 763 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 764 struct dwc2_host_chan *chan) 765 { 766 u32 hcintmsk = HCINTMSK_CHHLTD; 767 768 switch (chan->ep_type) { 769 case USB_ENDPOINT_XFER_CONTROL: 770 case USB_ENDPOINT_XFER_BULK: 771 dev_vdbg(hsotg->dev, "control/bulk\n"); 772 hcintmsk |= HCINTMSK_XFERCOMPL; 773 hcintmsk |= HCINTMSK_STALL; 774 hcintmsk |= HCINTMSK_XACTERR; 775 hcintmsk |= HCINTMSK_DATATGLERR; 776 if (chan->ep_is_in) { 777 hcintmsk |= HCINTMSK_BBLERR; 778 } else { 779 hcintmsk |= HCINTMSK_NAK; 780 hcintmsk |= HCINTMSK_NYET; 781 if (chan->do_ping) 782 hcintmsk |= HCINTMSK_ACK; 783 } 784 785 if (chan->do_split) { 786 hcintmsk |= HCINTMSK_NAK; 787 if (chan->complete_split) 788 hcintmsk |= HCINTMSK_NYET; 789 else 790 hcintmsk |= HCINTMSK_ACK; 791 } 792 793 if (chan->error_state) 794 hcintmsk |= HCINTMSK_ACK; 795 break; 796 797 case USB_ENDPOINT_XFER_INT: 798 if (dbg_perio()) 799 dev_vdbg(hsotg->dev, "intr\n"); 800 hcintmsk |= HCINTMSK_XFERCOMPL; 801 hcintmsk |= HCINTMSK_NAK; 802 hcintmsk |= HCINTMSK_STALL; 803 hcintmsk |= HCINTMSK_XACTERR; 804 hcintmsk |= HCINTMSK_DATATGLERR; 805 hcintmsk |= HCINTMSK_FRMOVRUN; 806 807 if (chan->ep_is_in) 808 hcintmsk |= HCINTMSK_BBLERR; 809 if (chan->error_state) 810 hcintmsk |= HCINTMSK_ACK; 811 if (chan->do_split) { 812 if (chan->complete_split) 813 hcintmsk |= HCINTMSK_NYET; 814 else 815 hcintmsk |= HCINTMSK_ACK; 816 } 817 break; 818 819 case USB_ENDPOINT_XFER_ISOC: 820 if (dbg_perio()) 821 dev_vdbg(hsotg->dev, "isoc\n"); 822 hcintmsk |= HCINTMSK_XFERCOMPL; 823 hcintmsk |= HCINTMSK_FRMOVRUN; 824 hcintmsk |= HCINTMSK_ACK; 825 826 if (chan->ep_is_in) { 827 hcintmsk |= HCINTMSK_XACTERR; 828 hcintmsk |= HCINTMSK_BBLERR; 829 } 830 break; 831 default: 832 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 833 break; 834 } 835 836 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 837 if (dbg_hc(chan)) 838 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 839 } 840 841 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 842 struct dwc2_host_chan *chan) 843 { 844 u32 hcintmsk = HCINTMSK_CHHLTD; 845 846 /* 847 * For Descriptor DMA mode core halts the channel on AHB error. 848 * Interrupt is not required. 849 */ 850 if (hsotg->core_params->dma_desc_enable <= 0) { 851 if (dbg_hc(chan)) 852 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 853 hcintmsk |= HCINTMSK_AHBERR; 854 } else { 855 if (dbg_hc(chan)) 856 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 857 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 858 hcintmsk |= HCINTMSK_XFERCOMPL; 859 } 860 861 if (chan->error_state && !chan->do_split && 862 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 863 if (dbg_hc(chan)) 864 dev_vdbg(hsotg->dev, "setting ACK\n"); 865 hcintmsk |= HCINTMSK_ACK; 866 if (chan->ep_is_in) { 867 hcintmsk |= HCINTMSK_DATATGLERR; 868 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 869 hcintmsk |= HCINTMSK_NAK; 870 } 871 } 872 873 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 874 if (dbg_hc(chan)) 875 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 876 } 877 878 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 879 struct dwc2_host_chan *chan) 880 { 881 u32 intmsk; 882 883 if (hsotg->core_params->dma_enable > 0) { 884 if (dbg_hc(chan)) 885 dev_vdbg(hsotg->dev, "DMA enabled\n"); 886 dwc2_hc_enable_dma_ints(hsotg, chan); 887 } else { 888 if (dbg_hc(chan)) 889 dev_vdbg(hsotg->dev, "DMA disabled\n"); 890 dwc2_hc_enable_slave_ints(hsotg, chan); 891 } 892 893 /* Enable the top level host channel interrupt */ 894 intmsk = readl(hsotg->regs + HAINTMSK); 895 intmsk |= 1 << chan->hc_num; 896 writel(intmsk, hsotg->regs + HAINTMSK); 897 if (dbg_hc(chan)) 898 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 899 900 /* Make sure host channel interrupts are enabled */ 901 intmsk = readl(hsotg->regs + GINTMSK); 902 intmsk |= GINTSTS_HCHINT; 903 writel(intmsk, hsotg->regs + GINTMSK); 904 if (dbg_hc(chan)) 905 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 906 } 907 908 /** 909 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 910 * a specific endpoint 911 * 912 * @hsotg: Programming view of DWC_otg controller 913 * @chan: Information needed to initialize the host channel 914 * 915 * The HCCHARn register is set up with the characteristics specified in chan. 916 * Host channel interrupts that may need to be serviced while this transfer is 917 * in progress are enabled. 918 */ 919 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 920 { 921 u8 hc_num = chan->hc_num; 922 u32 hcintmsk; 923 u32 hcchar; 924 u32 hcsplt = 0; 925 926 if (dbg_hc(chan)) 927 dev_vdbg(hsotg->dev, "%s()\n", __func__); 928 929 /* Clear old interrupt conditions for this host channel */ 930 hcintmsk = 0xffffffff; 931 hcintmsk &= ~HCINTMSK_RESERVED14_31; 932 writel(hcintmsk, hsotg->regs + HCINT(hc_num)); 933 934 /* Enable channel interrupts required for this transfer */ 935 dwc2_hc_enable_ints(hsotg, chan); 936 937 /* 938 * Program the HCCHARn register with the endpoint characteristics for 939 * the current transfer 940 */ 941 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 942 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 943 if (chan->ep_is_in) 944 hcchar |= HCCHAR_EPDIR; 945 if (chan->speed == USB_SPEED_LOW) 946 hcchar |= HCCHAR_LSPDDEV; 947 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 948 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 949 writel(hcchar, hsotg->regs + HCCHAR(hc_num)); 950 if (dbg_hc(chan)) { 951 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 952 hc_num, hcchar); 953 954 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 955 __func__, hc_num); 956 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 957 chan->dev_addr); 958 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 959 chan->ep_num); 960 dev_vdbg(hsotg->dev, " Is In: %d\n", 961 chan->ep_is_in); 962 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 963 chan->speed == USB_SPEED_LOW); 964 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 965 chan->ep_type); 966 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 967 chan->max_packet); 968 } 969 970 /* Program the HCSPLT register for SPLITs */ 971 if (chan->do_split) { 972 if (dbg_hc(chan)) 973 dev_vdbg(hsotg->dev, 974 "Programming HC %d with split --> %s\n", 975 hc_num, 976 chan->complete_split ? "CSPLIT" : "SSPLIT"); 977 if (chan->complete_split) 978 hcsplt |= HCSPLT_COMPSPLT; 979 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 980 HCSPLT_XACTPOS_MASK; 981 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 982 HCSPLT_HUBADDR_MASK; 983 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 984 HCSPLT_PRTADDR_MASK; 985 if (dbg_hc(chan)) { 986 dev_vdbg(hsotg->dev, " comp split %d\n", 987 chan->complete_split); 988 dev_vdbg(hsotg->dev, " xact pos %d\n", 989 chan->xact_pos); 990 dev_vdbg(hsotg->dev, " hub addr %d\n", 991 chan->hub_addr); 992 dev_vdbg(hsotg->dev, " hub port %d\n", 993 chan->hub_port); 994 dev_vdbg(hsotg->dev, " is_in %d\n", 995 chan->ep_is_in); 996 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 997 chan->max_packet); 998 dev_vdbg(hsotg->dev, " xferlen %d\n", 999 chan->xfer_len); 1000 } 1001 } 1002 1003 writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); 1004 } 1005 1006 /** 1007 * dwc2_hc_halt() - Attempts to halt a host channel 1008 * 1009 * @hsotg: Controller register interface 1010 * @chan: Host channel to halt 1011 * @halt_status: Reason for halting the channel 1012 * 1013 * This function should only be called in Slave mode or to abort a transfer in 1014 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1015 * controller halts the channel when the transfer is complete or a condition 1016 * occurs that requires application intervention. 1017 * 1018 * In slave mode, checks for a free request queue entry, then sets the Channel 1019 * Enable and Channel Disable bits of the Host Channel Characteristics 1020 * register of the specified channel to intiate the halt. If there is no free 1021 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1022 * register to flush requests for this channel. In the latter case, sets a 1023 * flag to indicate that the host channel needs to be halted when a request 1024 * queue slot is open. 1025 * 1026 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1027 * HCCHARn register. The controller ensures there is space in the request 1028 * queue before submitting the halt request. 1029 * 1030 * Some time may elapse before the core flushes any posted requests for this 1031 * host channel and halts. The Channel Halted interrupt handler completes the 1032 * deactivation of the host channel. 1033 */ 1034 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1035 enum dwc2_halt_status halt_status) 1036 { 1037 u32 nptxsts, hptxsts, hcchar; 1038 1039 if (dbg_hc(chan)) 1040 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1041 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1042 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1043 1044 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1045 halt_status == DWC2_HC_XFER_AHB_ERR) { 1046 /* 1047 * Disable all channel interrupts except Ch Halted. The QTD 1048 * and QH state associated with this transfer has been cleared 1049 * (in the case of URB_DEQUEUE), so the channel needs to be 1050 * shut down carefully to prevent crashes. 1051 */ 1052 u32 hcintmsk = HCINTMSK_CHHLTD; 1053 1054 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1055 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1056 1057 /* 1058 * Make sure no other interrupts besides halt are currently 1059 * pending. Handling another interrupt could cause a crash due 1060 * to the QTD and QH state. 1061 */ 1062 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1063 1064 /* 1065 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1066 * even if the channel was already halted for some other 1067 * reason 1068 */ 1069 chan->halt_status = halt_status; 1070 1071 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1072 if (!(hcchar & HCCHAR_CHENA)) { 1073 /* 1074 * The channel is either already halted or it hasn't 1075 * started yet. In DMA mode, the transfer may halt if 1076 * it finishes normally or a condition occurs that 1077 * requires driver intervention. Don't want to halt 1078 * the channel again. In either Slave or DMA mode, 1079 * it's possible that the transfer has been assigned 1080 * to a channel, but not started yet when an URB is 1081 * dequeued. Don't want to halt a channel that hasn't 1082 * started yet. 1083 */ 1084 return; 1085 } 1086 } 1087 if (chan->halt_pending) { 1088 /* 1089 * A halt has already been issued for this channel. This might 1090 * happen when a transfer is aborted by a higher level in 1091 * the stack. 1092 */ 1093 dev_vdbg(hsotg->dev, 1094 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1095 __func__, chan->hc_num); 1096 return; 1097 } 1098 1099 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1100 1101 /* No need to set the bit in DDMA for disabling the channel */ 1102 /* TODO check it everywhere channel is disabled */ 1103 if (hsotg->core_params->dma_desc_enable <= 0) { 1104 if (dbg_hc(chan)) 1105 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1106 hcchar |= HCCHAR_CHENA; 1107 } else { 1108 if (dbg_hc(chan)) 1109 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1110 } 1111 hcchar |= HCCHAR_CHDIS; 1112 1113 if (hsotg->core_params->dma_enable <= 0) { 1114 if (dbg_hc(chan)) 1115 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1116 hcchar |= HCCHAR_CHENA; 1117 1118 /* Check for space in the request queue to issue the halt */ 1119 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1120 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1121 dev_vdbg(hsotg->dev, "control/bulk\n"); 1122 nptxsts = readl(hsotg->regs + GNPTXSTS); 1123 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1124 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1125 hcchar &= ~HCCHAR_CHENA; 1126 } 1127 } else { 1128 if (dbg_perio()) 1129 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1130 hptxsts = readl(hsotg->regs + HPTXSTS); 1131 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1132 hsotg->queuing_high_bandwidth) { 1133 if (dbg_perio()) 1134 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1135 hcchar &= ~HCCHAR_CHENA; 1136 } 1137 } 1138 } else { 1139 if (dbg_hc(chan)) 1140 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1141 } 1142 1143 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1144 chan->halt_status = halt_status; 1145 1146 if (hcchar & HCCHAR_CHENA) { 1147 if (dbg_hc(chan)) 1148 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1149 chan->halt_pending = 1; 1150 chan->halt_on_queue = 0; 1151 } else { 1152 if (dbg_hc(chan)) 1153 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1154 chan->halt_on_queue = 1; 1155 } 1156 1157 if (dbg_hc(chan)) { 1158 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1159 chan->hc_num); 1160 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1161 hcchar); 1162 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1163 chan->halt_pending); 1164 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1165 chan->halt_on_queue); 1166 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1167 chan->halt_status); 1168 } 1169 } 1170 1171 /** 1172 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1173 * 1174 * @hsotg: Programming view of DWC_otg controller 1175 * @chan: Identifies the host channel to clean up 1176 * 1177 * This function is normally called after a transfer is done and the host 1178 * channel is being released 1179 */ 1180 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1181 { 1182 u32 hcintmsk; 1183 1184 chan->xfer_started = 0; 1185 1186 /* 1187 * Clear channel interrupt enables and any unhandled channel interrupt 1188 * conditions 1189 */ 1190 writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); 1191 hcintmsk = 0xffffffff; 1192 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1193 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1194 } 1195 1196 /** 1197 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1198 * which frame a periodic transfer should occur 1199 * 1200 * @hsotg: Programming view of DWC_otg controller 1201 * @chan: Identifies the host channel to set up and its properties 1202 * @hcchar: Current value of the HCCHAR register for the specified host channel 1203 * 1204 * This function has no effect on non-periodic transfers 1205 */ 1206 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1207 struct dwc2_host_chan *chan, u32 *hcchar) 1208 { 1209 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1210 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1211 /* 1 if _next_ frame is odd, 0 if it's even */ 1212 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1213 *hcchar |= HCCHAR_ODDFRM; 1214 } 1215 } 1216 1217 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1218 { 1219 /* Set up the initial PID for the transfer */ 1220 if (chan->speed == USB_SPEED_HIGH) { 1221 if (chan->ep_is_in) { 1222 if (chan->multi_count == 1) 1223 chan->data_pid_start = DWC2_HC_PID_DATA0; 1224 else if (chan->multi_count == 2) 1225 chan->data_pid_start = DWC2_HC_PID_DATA1; 1226 else 1227 chan->data_pid_start = DWC2_HC_PID_DATA2; 1228 } else { 1229 if (chan->multi_count == 1) 1230 chan->data_pid_start = DWC2_HC_PID_DATA0; 1231 else 1232 chan->data_pid_start = DWC2_HC_PID_MDATA; 1233 } 1234 } else { 1235 chan->data_pid_start = DWC2_HC_PID_DATA0; 1236 } 1237 } 1238 1239 /** 1240 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1241 * the Host Channel 1242 * 1243 * @hsotg: Programming view of DWC_otg controller 1244 * @chan: Information needed to initialize the host channel 1245 * 1246 * This function should only be called in Slave mode. For a channel associated 1247 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1248 * associated with a periodic EP, the periodic Tx FIFO is written. 1249 * 1250 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1251 * the number of bytes written to the Tx FIFO. 1252 */ 1253 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1254 struct dwc2_host_chan *chan) 1255 { 1256 u32 i; 1257 u32 remaining_count; 1258 u32 byte_count; 1259 u32 dword_count; 1260 u32 __iomem *data_fifo; 1261 u32 *data_buf = (u32 *)chan->xfer_buf; 1262 1263 if (dbg_hc(chan)) 1264 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1265 1266 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); 1267 1268 remaining_count = chan->xfer_len - chan->xfer_count; 1269 if (remaining_count > chan->max_packet) 1270 byte_count = chan->max_packet; 1271 else 1272 byte_count = remaining_count; 1273 1274 dword_count = (byte_count + 3) / 4; 1275 1276 if (((unsigned long)data_buf & 0x3) == 0) { 1277 /* xfer_buf is DWORD aligned */ 1278 for (i = 0; i < dword_count; i++, data_buf++) 1279 writel(*data_buf, data_fifo); 1280 } else { 1281 /* xfer_buf is not DWORD aligned */ 1282 for (i = 0; i < dword_count; i++, data_buf++) { 1283 u32 data = data_buf[0] | data_buf[1] << 8 | 1284 data_buf[2] << 16 | data_buf[3] << 24; 1285 writel(data, data_fifo); 1286 } 1287 } 1288 1289 chan->xfer_count += byte_count; 1290 chan->xfer_buf += byte_count; 1291 } 1292 1293 /** 1294 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1295 * channel and starts the transfer 1296 * 1297 * @hsotg: Programming view of DWC_otg controller 1298 * @chan: Information needed to initialize the host channel. The xfer_len value 1299 * may be reduced to accommodate the max widths of the XferSize and 1300 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1301 * changed to reflect the final xfer_len value. 1302 * 1303 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1304 * the caller must ensure that there is sufficient space in the request queue 1305 * and Tx Data FIFO. 1306 * 1307 * For an OUT transfer in Slave mode, it loads a data packet into the 1308 * appropriate FIFO. If necessary, additional data packets are loaded in the 1309 * Host ISR. 1310 * 1311 * For an IN transfer in Slave mode, a data packet is requested. The data 1312 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1313 * additional data packets are requested in the Host ISR. 1314 * 1315 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1316 * register along with a packet count of 1 and the channel is enabled. This 1317 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1318 * simply set to 0 since no data transfer occurs in this case. 1319 * 1320 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1321 * all the information required to perform the subsequent data transfer. In 1322 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1323 * controller performs the entire PING protocol, then starts the data 1324 * transfer. 1325 */ 1326 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1327 struct dwc2_host_chan *chan) 1328 { 1329 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1330 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1331 u32 hcchar; 1332 u32 hctsiz = 0; 1333 u16 num_packets; 1334 1335 if (dbg_hc(chan)) 1336 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1337 1338 if (chan->do_ping) { 1339 if (hsotg->core_params->dma_enable <= 0) { 1340 if (dbg_hc(chan)) 1341 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1342 dwc2_hc_do_ping(hsotg, chan); 1343 chan->xfer_started = 1; 1344 return; 1345 } else { 1346 if (dbg_hc(chan)) 1347 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1348 hctsiz |= TSIZ_DOPNG; 1349 } 1350 } 1351 1352 if (chan->do_split) { 1353 if (dbg_hc(chan)) 1354 dev_vdbg(hsotg->dev, "split\n"); 1355 num_packets = 1; 1356 1357 if (chan->complete_split && !chan->ep_is_in) 1358 /* 1359 * For CSPLIT OUT Transfer, set the size to 0 so the 1360 * core doesn't expect any data written to the FIFO 1361 */ 1362 chan->xfer_len = 0; 1363 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1364 chan->xfer_len = chan->max_packet; 1365 else if (!chan->ep_is_in && chan->xfer_len > 188) 1366 chan->xfer_len = 188; 1367 1368 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1369 TSIZ_XFERSIZE_MASK; 1370 } else { 1371 if (dbg_hc(chan)) 1372 dev_vdbg(hsotg->dev, "no split\n"); 1373 /* 1374 * Ensure that the transfer length and packet count will fit 1375 * in the widths allocated for them in the HCTSIZn register 1376 */ 1377 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1378 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1379 /* 1380 * Make sure the transfer size is no larger than one 1381 * (micro)frame's worth of data. (A check was done 1382 * when the periodic transfer was accepted to ensure 1383 * that a (micro)frame's worth of data can be 1384 * programmed into a channel.) 1385 */ 1386 u32 max_periodic_len = 1387 chan->multi_count * chan->max_packet; 1388 1389 if (chan->xfer_len > max_periodic_len) 1390 chan->xfer_len = max_periodic_len; 1391 } else if (chan->xfer_len > max_hc_xfer_size) { 1392 /* 1393 * Make sure that xfer_len is a multiple of max packet 1394 * size 1395 */ 1396 chan->xfer_len = 1397 max_hc_xfer_size - chan->max_packet + 1; 1398 } 1399 1400 if (chan->xfer_len > 0) { 1401 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1402 chan->max_packet; 1403 if (num_packets > max_hc_pkt_count) { 1404 num_packets = max_hc_pkt_count; 1405 chan->xfer_len = num_packets * chan->max_packet; 1406 } 1407 } else { 1408 /* Need 1 packet for transfer length of 0 */ 1409 num_packets = 1; 1410 } 1411 1412 if (chan->ep_is_in) 1413 /* 1414 * Always program an integral # of max packets for IN 1415 * transfers 1416 */ 1417 chan->xfer_len = num_packets * chan->max_packet; 1418 1419 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1420 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1421 /* 1422 * Make sure that the multi_count field matches the 1423 * actual transfer length 1424 */ 1425 chan->multi_count = num_packets; 1426 1427 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1428 dwc2_set_pid_isoc(chan); 1429 1430 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1431 TSIZ_XFERSIZE_MASK; 1432 } 1433 1434 chan->start_pkt_count = num_packets; 1435 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1436 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1437 TSIZ_SC_MC_PID_MASK; 1438 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1439 if (dbg_hc(chan)) { 1440 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1441 hctsiz, chan->hc_num); 1442 1443 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1444 chan->hc_num); 1445 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1446 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1447 TSIZ_XFERSIZE_SHIFT); 1448 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1449 (hctsiz & TSIZ_PKTCNT_MASK) >> 1450 TSIZ_PKTCNT_SHIFT); 1451 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1452 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1453 TSIZ_SC_MC_PID_SHIFT); 1454 } 1455 1456 if (hsotg->core_params->dma_enable > 0) { 1457 dma_addr_t dma_addr; 1458 1459 if (chan->align_buf) { 1460 if (dbg_hc(chan)) 1461 dev_vdbg(hsotg->dev, "align_buf\n"); 1462 dma_addr = chan->align_buf; 1463 } else { 1464 dma_addr = chan->xfer_dma; 1465 } 1466 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); 1467 if (dbg_hc(chan)) 1468 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", 1469 (unsigned long)dma_addr, chan->hc_num); 1470 } 1471 1472 /* Start the split */ 1473 if (chan->do_split) { 1474 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num)); 1475 1476 hcsplt |= HCSPLT_SPLTENA; 1477 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); 1478 } 1479 1480 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1481 hcchar &= ~HCCHAR_MULTICNT_MASK; 1482 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1483 HCCHAR_MULTICNT_MASK; 1484 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1485 1486 if (hcchar & HCCHAR_CHDIS) 1487 dev_warn(hsotg->dev, 1488 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1489 __func__, chan->hc_num, hcchar); 1490 1491 /* Set host channel enable after all other setup is complete */ 1492 hcchar |= HCCHAR_CHENA; 1493 hcchar &= ~HCCHAR_CHDIS; 1494 1495 if (dbg_hc(chan)) 1496 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1497 (hcchar & HCCHAR_MULTICNT_MASK) >> 1498 HCCHAR_MULTICNT_SHIFT); 1499 1500 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1501 if (dbg_hc(chan)) 1502 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1503 chan->hc_num); 1504 1505 chan->xfer_started = 1; 1506 chan->requests++; 1507 1508 if (hsotg->core_params->dma_enable <= 0 && 1509 !chan->ep_is_in && chan->xfer_len > 0) 1510 /* Load OUT packet into the appropriate Tx FIFO */ 1511 dwc2_hc_write_packet(hsotg, chan); 1512 } 1513 1514 /** 1515 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 1516 * host channel and starts the transfer in Descriptor DMA mode 1517 * 1518 * @hsotg: Programming view of DWC_otg controller 1519 * @chan: Information needed to initialize the host channel 1520 * 1521 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 1522 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 1523 * with micro-frame bitmap. 1524 * 1525 * Initializes HCDMA register with descriptor list address and CTD value then 1526 * starts the transfer via enabling the channel. 1527 */ 1528 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 1529 struct dwc2_host_chan *chan) 1530 { 1531 u32 hcchar; 1532 u32 hc_dma; 1533 u32 hctsiz = 0; 1534 1535 if (chan->do_ping) 1536 hctsiz |= TSIZ_DOPNG; 1537 1538 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1539 dwc2_set_pid_isoc(chan); 1540 1541 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 1542 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1543 TSIZ_SC_MC_PID_MASK; 1544 1545 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 1546 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 1547 1548 /* Non-zero only for high-speed interrupt endpoints */ 1549 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 1550 1551 if (dbg_hc(chan)) { 1552 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1553 chan->hc_num); 1554 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1555 chan->data_pid_start); 1556 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 1557 } 1558 1559 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1560 1561 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; 1562 1563 /* Always start from first descriptor */ 1564 hc_dma &= ~HCDMA_CTD_MASK; 1565 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num)); 1566 if (dbg_hc(chan)) 1567 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", 1568 hc_dma, chan->hc_num); 1569 1570 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1571 hcchar &= ~HCCHAR_MULTICNT_MASK; 1572 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1573 HCCHAR_MULTICNT_MASK; 1574 1575 if (hcchar & HCCHAR_CHDIS) 1576 dev_warn(hsotg->dev, 1577 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1578 __func__, chan->hc_num, hcchar); 1579 1580 /* Set host channel enable after all other setup is complete */ 1581 hcchar |= HCCHAR_CHENA; 1582 hcchar &= ~HCCHAR_CHDIS; 1583 1584 if (dbg_hc(chan)) 1585 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1586 (hcchar & HCCHAR_MULTICNT_MASK) >> 1587 HCCHAR_MULTICNT_SHIFT); 1588 1589 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1590 if (dbg_hc(chan)) 1591 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1592 chan->hc_num); 1593 1594 chan->xfer_started = 1; 1595 chan->requests++; 1596 } 1597 1598 /** 1599 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 1600 * a previous call to dwc2_hc_start_transfer() 1601 * 1602 * @hsotg: Programming view of DWC_otg controller 1603 * @chan: Information needed to initialize the host channel 1604 * 1605 * The caller must ensure there is sufficient space in the request queue and Tx 1606 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 1607 * the controller acts autonomously to complete transfers programmed to a host 1608 * channel. 1609 * 1610 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 1611 * if there is any data remaining to be queued. For an IN transfer, another 1612 * data packet is always requested. For the SETUP phase of a control transfer, 1613 * this function does nothing. 1614 * 1615 * Return: 1 if a new request is queued, 0 if no more requests are required 1616 * for this transfer 1617 */ 1618 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 1619 struct dwc2_host_chan *chan) 1620 { 1621 if (dbg_hc(chan)) 1622 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1623 chan->hc_num); 1624 1625 if (chan->do_split) 1626 /* SPLITs always queue just once per channel */ 1627 return 0; 1628 1629 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 1630 /* SETUPs are queued only once since they can't be NAK'd */ 1631 return 0; 1632 1633 if (chan->ep_is_in) { 1634 /* 1635 * Always queue another request for other IN transfers. If 1636 * back-to-back INs are issued and NAKs are received for both, 1637 * the driver may still be processing the first NAK when the 1638 * second NAK is received. When the interrupt handler clears 1639 * the NAK interrupt for the first NAK, the second NAK will 1640 * not be seen. So we can't depend on the NAK interrupt 1641 * handler to requeue a NAK'd request. Instead, IN requests 1642 * are issued each time this function is called. When the 1643 * transfer completes, the extra requests for the channel will 1644 * be flushed. 1645 */ 1646 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1647 1648 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1649 hcchar |= HCCHAR_CHENA; 1650 hcchar &= ~HCCHAR_CHDIS; 1651 if (dbg_hc(chan)) 1652 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 1653 hcchar); 1654 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1655 chan->requests++; 1656 return 1; 1657 } 1658 1659 /* OUT transfers */ 1660 1661 if (chan->xfer_count < chan->xfer_len) { 1662 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1663 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1664 u32 hcchar = readl(hsotg->regs + 1665 HCCHAR(chan->hc_num)); 1666 1667 dwc2_hc_set_even_odd_frame(hsotg, chan, 1668 &hcchar); 1669 } 1670 1671 /* Load OUT packet into the appropriate Tx FIFO */ 1672 dwc2_hc_write_packet(hsotg, chan); 1673 chan->requests++; 1674 return 1; 1675 } 1676 1677 return 0; 1678 } 1679 1680 /** 1681 * dwc2_hc_do_ping() - Starts a PING transfer 1682 * 1683 * @hsotg: Programming view of DWC_otg controller 1684 * @chan: Information needed to initialize the host channel 1685 * 1686 * This function should only be called in Slave mode. The Do Ping bit is set in 1687 * the HCTSIZ register, then the channel is enabled. 1688 */ 1689 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1690 { 1691 u32 hcchar; 1692 u32 hctsiz; 1693 1694 if (dbg_hc(chan)) 1695 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1696 chan->hc_num); 1697 1698 1699 hctsiz = TSIZ_DOPNG; 1700 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 1701 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1702 1703 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 1704 hcchar |= HCCHAR_CHENA; 1705 hcchar &= ~HCCHAR_CHDIS; 1706 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1707 } 1708 1709 /** 1710 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 1711 * the HFIR register according to PHY type and speed 1712 * 1713 * @hsotg: Programming view of DWC_otg controller 1714 * 1715 * NOTE: The caller can modify the value of the HFIR register only after the 1716 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 1717 * has been set 1718 */ 1719 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 1720 { 1721 u32 usbcfg; 1722 u32 hprt0; 1723 int clock = 60; /* default value */ 1724 1725 usbcfg = readl(hsotg->regs + GUSBCFG); 1726 hprt0 = readl(hsotg->regs + HPRT0); 1727 1728 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 1729 !(usbcfg & GUSBCFG_PHYIF16)) 1730 clock = 60; 1731 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 1732 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 1733 clock = 48; 1734 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 1735 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 1736 clock = 30; 1737 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 1738 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 1739 clock = 60; 1740 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 1741 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 1742 clock = 48; 1743 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 1744 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 1745 clock = 48; 1746 if ((usbcfg & GUSBCFG_PHYSEL) && 1747 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 1748 clock = 48; 1749 1750 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 1751 /* High speed case */ 1752 return 125 * clock; 1753 else 1754 /* FS/LS case */ 1755 return 1000 * clock; 1756 } 1757 1758 /** 1759 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 1760 * buffer 1761 * 1762 * @core_if: Programming view of DWC_otg controller 1763 * @dest: Destination buffer for the packet 1764 * @bytes: Number of bytes to copy to the destination 1765 */ 1766 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 1767 { 1768 u32 __iomem *fifo = hsotg->regs + HCFIFO(0); 1769 u32 *data_buf = (u32 *)dest; 1770 int word_count = (bytes + 3) / 4; 1771 int i; 1772 1773 /* 1774 * Todo: Account for the case where dest is not dword aligned. This 1775 * requires reading data from the FIFO into a u32 temp buffer, then 1776 * moving it into the data buffer. 1777 */ 1778 1779 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 1780 1781 for (i = 0; i < word_count; i++, data_buf++) 1782 *data_buf = readl(fifo); 1783 } 1784 1785 /** 1786 * dwc2_dump_host_registers() - Prints the host registers 1787 * 1788 * @hsotg: Programming view of DWC_otg controller 1789 * 1790 * NOTE: This function will be removed once the peripheral controller code 1791 * is integrated and the driver is stable 1792 */ 1793 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 1794 { 1795 #ifdef DEBUG 1796 u32 __iomem *addr; 1797 int i; 1798 1799 dev_dbg(hsotg->dev, "Host Global Registers\n"); 1800 addr = hsotg->regs + HCFG; 1801 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 1802 (unsigned long)addr, readl(addr)); 1803 addr = hsotg->regs + HFIR; 1804 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 1805 (unsigned long)addr, readl(addr)); 1806 addr = hsotg->regs + HFNUM; 1807 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 1808 (unsigned long)addr, readl(addr)); 1809 addr = hsotg->regs + HPTXSTS; 1810 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 1811 (unsigned long)addr, readl(addr)); 1812 addr = hsotg->regs + HAINT; 1813 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 1814 (unsigned long)addr, readl(addr)); 1815 addr = hsotg->regs + HAINTMSK; 1816 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 1817 (unsigned long)addr, readl(addr)); 1818 if (hsotg->core_params->dma_desc_enable > 0) { 1819 addr = hsotg->regs + HFLBADDR; 1820 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 1821 (unsigned long)addr, readl(addr)); 1822 } 1823 1824 addr = hsotg->regs + HPRT0; 1825 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 1826 (unsigned long)addr, readl(addr)); 1827 1828 for (i = 0; i < hsotg->core_params->host_channels; i++) { 1829 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 1830 addr = hsotg->regs + HCCHAR(i); 1831 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 1832 (unsigned long)addr, readl(addr)); 1833 addr = hsotg->regs + HCSPLT(i); 1834 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 1835 (unsigned long)addr, readl(addr)); 1836 addr = hsotg->regs + HCINT(i); 1837 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 1838 (unsigned long)addr, readl(addr)); 1839 addr = hsotg->regs + HCINTMSK(i); 1840 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 1841 (unsigned long)addr, readl(addr)); 1842 addr = hsotg->regs + HCTSIZ(i); 1843 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 1844 (unsigned long)addr, readl(addr)); 1845 addr = hsotg->regs + HCDMA(i); 1846 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 1847 (unsigned long)addr, readl(addr)); 1848 if (hsotg->core_params->dma_desc_enable > 0) { 1849 addr = hsotg->regs + HCDMAB(i); 1850 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 1851 (unsigned long)addr, readl(addr)); 1852 } 1853 } 1854 #endif 1855 } 1856 1857 /** 1858 * dwc2_dump_global_registers() - Prints the core global registers 1859 * 1860 * @hsotg: Programming view of DWC_otg controller 1861 * 1862 * NOTE: This function will be removed once the peripheral controller code 1863 * is integrated and the driver is stable 1864 */ 1865 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 1866 { 1867 #ifdef DEBUG 1868 u32 __iomem *addr; 1869 1870 dev_dbg(hsotg->dev, "Core Global Registers\n"); 1871 addr = hsotg->regs + GOTGCTL; 1872 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 1873 (unsigned long)addr, readl(addr)); 1874 addr = hsotg->regs + GOTGINT; 1875 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 1876 (unsigned long)addr, readl(addr)); 1877 addr = hsotg->regs + GAHBCFG; 1878 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 1879 (unsigned long)addr, readl(addr)); 1880 addr = hsotg->regs + GUSBCFG; 1881 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 1882 (unsigned long)addr, readl(addr)); 1883 addr = hsotg->regs + GRSTCTL; 1884 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 1885 (unsigned long)addr, readl(addr)); 1886 addr = hsotg->regs + GINTSTS; 1887 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 1888 (unsigned long)addr, readl(addr)); 1889 addr = hsotg->regs + GINTMSK; 1890 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 1891 (unsigned long)addr, readl(addr)); 1892 addr = hsotg->regs + GRXSTSR; 1893 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 1894 (unsigned long)addr, readl(addr)); 1895 addr = hsotg->regs + GRXFSIZ; 1896 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 1897 (unsigned long)addr, readl(addr)); 1898 addr = hsotg->regs + GNPTXFSIZ; 1899 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 1900 (unsigned long)addr, readl(addr)); 1901 addr = hsotg->regs + GNPTXSTS; 1902 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 1903 (unsigned long)addr, readl(addr)); 1904 addr = hsotg->regs + GI2CCTL; 1905 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 1906 (unsigned long)addr, readl(addr)); 1907 addr = hsotg->regs + GPVNDCTL; 1908 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 1909 (unsigned long)addr, readl(addr)); 1910 addr = hsotg->regs + GGPIO; 1911 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 1912 (unsigned long)addr, readl(addr)); 1913 addr = hsotg->regs + GUID; 1914 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 1915 (unsigned long)addr, readl(addr)); 1916 addr = hsotg->regs + GSNPSID; 1917 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 1918 (unsigned long)addr, readl(addr)); 1919 addr = hsotg->regs + GHWCFG1; 1920 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 1921 (unsigned long)addr, readl(addr)); 1922 addr = hsotg->regs + GHWCFG2; 1923 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 1924 (unsigned long)addr, readl(addr)); 1925 addr = hsotg->regs + GHWCFG3; 1926 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 1927 (unsigned long)addr, readl(addr)); 1928 addr = hsotg->regs + GHWCFG4; 1929 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 1930 (unsigned long)addr, readl(addr)); 1931 addr = hsotg->regs + GLPMCFG; 1932 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 1933 (unsigned long)addr, readl(addr)); 1934 addr = hsotg->regs + GPWRDN; 1935 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 1936 (unsigned long)addr, readl(addr)); 1937 addr = hsotg->regs + GDFIFOCFG; 1938 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 1939 (unsigned long)addr, readl(addr)); 1940 addr = hsotg->regs + HPTXFSIZ; 1941 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 1942 (unsigned long)addr, readl(addr)); 1943 1944 addr = hsotg->regs + PCGCTL; 1945 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 1946 (unsigned long)addr, readl(addr)); 1947 #endif 1948 } 1949 1950 /** 1951 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 1952 * 1953 * @hsotg: Programming view of DWC_otg controller 1954 * @num: Tx FIFO to flush 1955 */ 1956 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 1957 { 1958 u32 greset; 1959 int count = 0; 1960 1961 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 1962 1963 greset = GRSTCTL_TXFFLSH; 1964 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 1965 writel(greset, hsotg->regs + GRSTCTL); 1966 1967 do { 1968 greset = readl(hsotg->regs + GRSTCTL); 1969 if (++count > 10000) { 1970 dev_warn(hsotg->dev, 1971 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 1972 __func__, greset, 1973 readl(hsotg->regs + GNPTXSTS)); 1974 break; 1975 } 1976 udelay(1); 1977 } while (greset & GRSTCTL_TXFFLSH); 1978 1979 /* Wait for at least 3 PHY Clocks */ 1980 udelay(1); 1981 } 1982 1983 /** 1984 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 1985 * 1986 * @hsotg: Programming view of DWC_otg controller 1987 */ 1988 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 1989 { 1990 u32 greset; 1991 int count = 0; 1992 1993 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1994 1995 greset = GRSTCTL_RXFFLSH; 1996 writel(greset, hsotg->regs + GRSTCTL); 1997 1998 do { 1999 greset = readl(hsotg->regs + GRSTCTL); 2000 if (++count > 10000) { 2001 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2002 __func__, greset); 2003 break; 2004 } 2005 udelay(1); 2006 } while (greset & GRSTCTL_RXFFLSH); 2007 2008 /* Wait for at least 3 PHY Clocks */ 2009 udelay(1); 2010 } 2011 2012 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2013 2014 /* Parameter access functions */ 2015 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2016 { 2017 int valid = 1; 2018 2019 switch (val) { 2020 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2021 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2022 valid = 0; 2023 break; 2024 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2025 switch (hsotg->hw_params.op_mode) { 2026 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2027 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2028 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2029 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2030 break; 2031 default: 2032 valid = 0; 2033 break; 2034 } 2035 break; 2036 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2037 /* always valid */ 2038 break; 2039 default: 2040 valid = 0; 2041 break; 2042 } 2043 2044 if (!valid) { 2045 if (val >= 0) 2046 dev_err(hsotg->dev, 2047 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2048 val); 2049 switch (hsotg->hw_params.op_mode) { 2050 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2051 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2052 break; 2053 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2054 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2055 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2056 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2057 break; 2058 default: 2059 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2060 break; 2061 } 2062 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2063 } 2064 2065 hsotg->core_params->otg_cap = val; 2066 } 2067 2068 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2069 { 2070 int valid = 1; 2071 2072 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2073 valid = 0; 2074 if (val < 0) 2075 valid = 0; 2076 2077 if (!valid) { 2078 if (val >= 0) 2079 dev_err(hsotg->dev, 2080 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2081 val); 2082 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2083 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2084 } 2085 2086 hsotg->core_params->dma_enable = val; 2087 } 2088 2089 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2090 { 2091 int valid = 1; 2092 2093 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2094 !hsotg->hw_params.dma_desc_enable)) 2095 valid = 0; 2096 if (val < 0) 2097 valid = 0; 2098 2099 if (!valid) { 2100 if (val >= 0) 2101 dev_err(hsotg->dev, 2102 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2103 val); 2104 val = (hsotg->core_params->dma_enable > 0 && 2105 hsotg->hw_params.dma_desc_enable); 2106 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2107 } 2108 2109 hsotg->core_params->dma_desc_enable = val; 2110 } 2111 2112 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2113 int val) 2114 { 2115 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2116 if (val >= 0) { 2117 dev_err(hsotg->dev, 2118 "Wrong value for host_support_fs_low_power\n"); 2119 dev_err(hsotg->dev, 2120 "host_support_fs_low_power must be 0 or 1\n"); 2121 } 2122 val = 0; 2123 dev_dbg(hsotg->dev, 2124 "Setting host_support_fs_low_power to %d\n", val); 2125 } 2126 2127 hsotg->core_params->host_support_fs_ls_low_power = val; 2128 } 2129 2130 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2131 { 2132 int valid = 1; 2133 2134 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2135 valid = 0; 2136 if (val < 0) 2137 valid = 0; 2138 2139 if (!valid) { 2140 if (val >= 0) 2141 dev_err(hsotg->dev, 2142 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2143 val); 2144 val = hsotg->hw_params.enable_dynamic_fifo; 2145 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2146 } 2147 2148 hsotg->core_params->enable_dynamic_fifo = val; 2149 } 2150 2151 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2152 { 2153 int valid = 1; 2154 2155 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2156 valid = 0; 2157 2158 if (!valid) { 2159 if (val >= 0) 2160 dev_err(hsotg->dev, 2161 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2162 val); 2163 val = hsotg->hw_params.host_rx_fifo_size; 2164 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2165 } 2166 2167 hsotg->core_params->host_rx_fifo_size = val; 2168 } 2169 2170 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2171 { 2172 int valid = 1; 2173 2174 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2175 valid = 0; 2176 2177 if (!valid) { 2178 if (val >= 0) 2179 dev_err(hsotg->dev, 2180 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2181 val); 2182 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2183 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2184 val); 2185 } 2186 2187 hsotg->core_params->host_nperio_tx_fifo_size = val; 2188 } 2189 2190 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2191 { 2192 int valid = 1; 2193 2194 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2195 valid = 0; 2196 2197 if (!valid) { 2198 if (val >= 0) 2199 dev_err(hsotg->dev, 2200 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2201 val); 2202 val = hsotg->hw_params.host_perio_tx_fifo_size; 2203 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2204 val); 2205 } 2206 2207 hsotg->core_params->host_perio_tx_fifo_size = val; 2208 } 2209 2210 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2211 { 2212 int valid = 1; 2213 2214 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2215 valid = 0; 2216 2217 if (!valid) { 2218 if (val >= 0) 2219 dev_err(hsotg->dev, 2220 "%d invalid for max_transfer_size. Check HW configuration.\n", 2221 val); 2222 val = hsotg->hw_params.max_transfer_size; 2223 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2224 } 2225 2226 hsotg->core_params->max_transfer_size = val; 2227 } 2228 2229 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2230 { 2231 int valid = 1; 2232 2233 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2234 valid = 0; 2235 2236 if (!valid) { 2237 if (val >= 0) 2238 dev_err(hsotg->dev, 2239 "%d invalid for max_packet_count. Check HW configuration.\n", 2240 val); 2241 val = hsotg->hw_params.max_packet_count; 2242 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2243 } 2244 2245 hsotg->core_params->max_packet_count = val; 2246 } 2247 2248 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2249 { 2250 int valid = 1; 2251 2252 if (val < 1 || val > hsotg->hw_params.host_channels) 2253 valid = 0; 2254 2255 if (!valid) { 2256 if (val >= 0) 2257 dev_err(hsotg->dev, 2258 "%d invalid for host_channels. Check HW configuration.\n", 2259 val); 2260 val = hsotg->hw_params.host_channels; 2261 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2262 } 2263 2264 hsotg->core_params->host_channels = val; 2265 } 2266 2267 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2268 { 2269 int valid = 0; 2270 u32 hs_phy_type, fs_phy_type; 2271 2272 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2273 DWC2_PHY_TYPE_PARAM_ULPI)) { 2274 if (val >= 0) { 2275 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2276 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2277 } 2278 2279 valid = 0; 2280 } 2281 2282 hs_phy_type = hsotg->hw_params.hs_phy_type; 2283 fs_phy_type = hsotg->hw_params.fs_phy_type; 2284 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2285 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2286 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2287 valid = 1; 2288 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2289 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2290 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2291 valid = 1; 2292 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2293 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2294 valid = 1; 2295 2296 if (!valid) { 2297 if (val >= 0) 2298 dev_err(hsotg->dev, 2299 "%d invalid for phy_type. Check HW configuration.\n", 2300 val); 2301 val = DWC2_PHY_TYPE_PARAM_FS; 2302 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2303 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2304 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2305 val = DWC2_PHY_TYPE_PARAM_UTMI; 2306 else 2307 val = DWC2_PHY_TYPE_PARAM_ULPI; 2308 } 2309 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2310 } 2311 2312 hsotg->core_params->phy_type = val; 2313 } 2314 2315 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2316 { 2317 return hsotg->core_params->phy_type; 2318 } 2319 2320 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2321 { 2322 int valid = 1; 2323 2324 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2325 if (val >= 0) { 2326 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2327 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2328 } 2329 valid = 0; 2330 } 2331 2332 if (val == DWC2_SPEED_PARAM_HIGH && 2333 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2334 valid = 0; 2335 2336 if (!valid) { 2337 if (val >= 0) 2338 dev_err(hsotg->dev, 2339 "%d invalid for speed parameter. Check HW configuration.\n", 2340 val); 2341 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2342 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2343 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2344 } 2345 2346 hsotg->core_params->speed = val; 2347 } 2348 2349 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2350 { 2351 int valid = 1; 2352 2353 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2354 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2355 if (val >= 0) { 2356 dev_err(hsotg->dev, 2357 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2358 dev_err(hsotg->dev, 2359 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2360 } 2361 valid = 0; 2362 } 2363 2364 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2365 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2366 valid = 0; 2367 2368 if (!valid) { 2369 if (val >= 0) 2370 dev_err(hsotg->dev, 2371 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2372 val); 2373 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2374 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2375 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2376 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2377 val); 2378 } 2379 2380 hsotg->core_params->host_ls_low_power_phy_clk = val; 2381 } 2382 2383 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2384 { 2385 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2386 if (val >= 0) { 2387 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2388 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2389 } 2390 val = 0; 2391 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2392 } 2393 2394 hsotg->core_params->phy_ulpi_ddr = val; 2395 } 2396 2397 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2398 { 2399 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2400 if (val >= 0) { 2401 dev_err(hsotg->dev, 2402 "Wrong value for phy_ulpi_ext_vbus\n"); 2403 dev_err(hsotg->dev, 2404 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2405 } 2406 val = 0; 2407 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2408 } 2409 2410 hsotg->core_params->phy_ulpi_ext_vbus = val; 2411 } 2412 2413 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2414 { 2415 int valid = 0; 2416 2417 switch (hsotg->hw_params.utmi_phy_data_width) { 2418 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2419 valid = (val == 8); 2420 break; 2421 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2422 valid = (val == 16); 2423 break; 2424 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2425 valid = (val == 8 || val == 16); 2426 break; 2427 } 2428 2429 if (!valid) { 2430 if (val >= 0) { 2431 dev_err(hsotg->dev, 2432 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2433 val); 2434 } 2435 val = (hsotg->hw_params.utmi_phy_data_width == 2436 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2437 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2438 } 2439 2440 hsotg->core_params->phy_utmi_width = val; 2441 } 2442 2443 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 2444 { 2445 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2446 if (val >= 0) { 2447 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 2448 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 2449 } 2450 val = 0; 2451 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 2452 } 2453 2454 hsotg->core_params->ulpi_fs_ls = val; 2455 } 2456 2457 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 2458 { 2459 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2460 if (val >= 0) { 2461 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 2462 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 2463 } 2464 val = 0; 2465 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 2466 } 2467 2468 hsotg->core_params->ts_dline = val; 2469 } 2470 2471 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 2472 { 2473 int valid = 1; 2474 2475 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2476 if (val >= 0) { 2477 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 2478 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 2479 } 2480 2481 valid = 0; 2482 } 2483 2484 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 2485 valid = 0; 2486 2487 if (!valid) { 2488 if (val >= 0) 2489 dev_err(hsotg->dev, 2490 "%d invalid for i2c_enable. Check HW configuration.\n", 2491 val); 2492 val = hsotg->hw_params.i2c_enable; 2493 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 2494 } 2495 2496 hsotg->core_params->i2c_enable = val; 2497 } 2498 2499 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 2500 { 2501 int valid = 1; 2502 2503 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2504 if (val >= 0) { 2505 dev_err(hsotg->dev, 2506 "Wrong value for en_multiple_tx_fifo,\n"); 2507 dev_err(hsotg->dev, 2508 "en_multiple_tx_fifo must be 0 or 1\n"); 2509 } 2510 valid = 0; 2511 } 2512 2513 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 2514 valid = 0; 2515 2516 if (!valid) { 2517 if (val >= 0) 2518 dev_err(hsotg->dev, 2519 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 2520 val); 2521 val = hsotg->hw_params.en_multiple_tx_fifo; 2522 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 2523 } 2524 2525 hsotg->core_params->en_multiple_tx_fifo = val; 2526 } 2527 2528 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 2529 { 2530 int valid = 1; 2531 2532 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2533 if (val >= 0) { 2534 dev_err(hsotg->dev, 2535 "'%d' invalid for parameter reload_ctl\n", val); 2536 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 2537 } 2538 valid = 0; 2539 } 2540 2541 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 2542 valid = 0; 2543 2544 if (!valid) { 2545 if (val >= 0) 2546 dev_err(hsotg->dev, 2547 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 2548 val); 2549 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 2550 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 2551 } 2552 2553 hsotg->core_params->reload_ctl = val; 2554 } 2555 2556 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 2557 { 2558 if (val != -1) 2559 hsotg->core_params->ahbcfg = val; 2560 else 2561 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 2562 GAHBCFG_HBSTLEN_SHIFT; 2563 } 2564 2565 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 2566 { 2567 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2568 if (val >= 0) { 2569 dev_err(hsotg->dev, 2570 "'%d' invalid for parameter otg_ver\n", val); 2571 dev_err(hsotg->dev, 2572 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 2573 } 2574 val = 0; 2575 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 2576 } 2577 2578 hsotg->core_params->otg_ver = val; 2579 } 2580 2581 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 2582 { 2583 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2584 if (val >= 0) { 2585 dev_err(hsotg->dev, 2586 "'%d' invalid for parameter uframe_sched\n", 2587 val); 2588 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 2589 } 2590 val = 1; 2591 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 2592 } 2593 2594 hsotg->core_params->uframe_sched = val; 2595 } 2596 2597 /* 2598 * This function is called during module intialization to pass module parameters 2599 * for the DWC_otg core. 2600 */ 2601 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 2602 const struct dwc2_core_params *params) 2603 { 2604 dev_dbg(hsotg->dev, "%s()\n", __func__); 2605 2606 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 2607 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 2608 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 2609 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 2610 params->host_support_fs_ls_low_power); 2611 dwc2_set_param_enable_dynamic_fifo(hsotg, 2612 params->enable_dynamic_fifo); 2613 dwc2_set_param_host_rx_fifo_size(hsotg, 2614 params->host_rx_fifo_size); 2615 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 2616 params->host_nperio_tx_fifo_size); 2617 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 2618 params->host_perio_tx_fifo_size); 2619 dwc2_set_param_max_transfer_size(hsotg, 2620 params->max_transfer_size); 2621 dwc2_set_param_max_packet_count(hsotg, 2622 params->max_packet_count); 2623 dwc2_set_param_host_channels(hsotg, params->host_channels); 2624 dwc2_set_param_phy_type(hsotg, params->phy_type); 2625 dwc2_set_param_speed(hsotg, params->speed); 2626 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 2627 params->host_ls_low_power_phy_clk); 2628 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 2629 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 2630 params->phy_ulpi_ext_vbus); 2631 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 2632 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 2633 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 2634 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 2635 dwc2_set_param_en_multiple_tx_fifo(hsotg, 2636 params->en_multiple_tx_fifo); 2637 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 2638 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 2639 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 2640 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 2641 } 2642 2643 /** 2644 * During device initialization, read various hardware configuration 2645 * registers and interpret the contents. 2646 */ 2647 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 2648 { 2649 struct dwc2_hw_params *hw = &hsotg->hw_params; 2650 unsigned width; 2651 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 2652 u32 hptxfsiz, grxfsiz, gnptxfsiz; 2653 u32 gusbcfg; 2654 2655 /* 2656 * Attempt to ensure this device is really a DWC_otg Controller. 2657 * Read and verify the GSNPSID register contents. The value should be 2658 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 2659 * as in "OTG version 2.xx" or "OTG version 3.xx". 2660 */ 2661 hw->snpsid = readl(hsotg->regs + GSNPSID); 2662 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 2663 (hw->snpsid & 0xfffff000) != 0x4f543000) { 2664 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 2665 hw->snpsid); 2666 return -ENODEV; 2667 } 2668 2669 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 2670 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 2671 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 2672 2673 hwcfg1 = readl(hsotg->regs + GHWCFG1); 2674 hwcfg2 = readl(hsotg->regs + GHWCFG2); 2675 hwcfg3 = readl(hsotg->regs + GHWCFG3); 2676 hwcfg4 = readl(hsotg->regs + GHWCFG4); 2677 gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ); 2678 grxfsiz = readl(hsotg->regs + GRXFSIZ); 2679 2680 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 2681 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 2682 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 2683 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 2684 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 2685 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 2686 2687 /* Force host mode to get HPTXFSIZ exact power on value */ 2688 gusbcfg = readl(hsotg->regs + GUSBCFG); 2689 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 2690 writel(gusbcfg, hsotg->regs + GUSBCFG); 2691 usleep_range(100000, 150000); 2692 2693 hptxfsiz = readl(hsotg->regs + HPTXFSIZ); 2694 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 2695 gusbcfg = readl(hsotg->regs + GUSBCFG); 2696 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 2697 writel(gusbcfg, hsotg->regs + GUSBCFG); 2698 usleep_range(100000, 150000); 2699 2700 /* hwcfg2 */ 2701 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 2702 GHWCFG2_OP_MODE_SHIFT; 2703 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 2704 GHWCFG2_ARCHITECTURE_SHIFT; 2705 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 2706 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 2707 GHWCFG2_NUM_HOST_CHAN_SHIFT); 2708 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 2709 GHWCFG2_HS_PHY_TYPE_SHIFT; 2710 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 2711 GHWCFG2_FS_PHY_TYPE_SHIFT; 2712 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 2713 GHWCFG2_NUM_DEV_EP_SHIFT; 2714 hw->nperio_tx_q_depth = 2715 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 2716 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 2717 hw->host_perio_tx_q_depth = 2718 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 2719 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 2720 hw->dev_token_q_depth = 2721 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 2722 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 2723 2724 /* hwcfg3 */ 2725 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 2726 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 2727 hw->max_transfer_size = (1 << (width + 11)) - 1; 2728 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 2729 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 2730 hw->max_packet_count = (1 << (width + 4)) - 1; 2731 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 2732 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 2733 GHWCFG3_DFIFO_DEPTH_SHIFT; 2734 2735 /* hwcfg4 */ 2736 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 2737 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 2738 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 2739 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 2740 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 2741 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 2742 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 2743 2744 /* fifo sizes */ 2745 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 2746 GRXFSIZ_DEPTH_SHIFT; 2747 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 2748 FIFOSIZE_DEPTH_SHIFT; 2749 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 2750 FIFOSIZE_DEPTH_SHIFT; 2751 2752 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 2753 dev_dbg(hsotg->dev, " op_mode=%d\n", 2754 hw->op_mode); 2755 dev_dbg(hsotg->dev, " arch=%d\n", 2756 hw->arch); 2757 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 2758 hw->dma_desc_enable); 2759 dev_dbg(hsotg->dev, " power_optimized=%d\n", 2760 hw->power_optimized); 2761 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 2762 hw->i2c_enable); 2763 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 2764 hw->hs_phy_type); 2765 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 2766 hw->fs_phy_type); 2767 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n", 2768 hw->utmi_phy_data_width); 2769 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 2770 hw->num_dev_ep); 2771 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 2772 hw->num_dev_perio_in_ep); 2773 dev_dbg(hsotg->dev, " host_channels=%d\n", 2774 hw->host_channels); 2775 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 2776 hw->max_transfer_size); 2777 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 2778 hw->max_packet_count); 2779 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 2780 hw->nperio_tx_q_depth); 2781 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 2782 hw->host_perio_tx_q_depth); 2783 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 2784 hw->dev_token_q_depth); 2785 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 2786 hw->enable_dynamic_fifo); 2787 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 2788 hw->en_multiple_tx_fifo); 2789 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 2790 hw->total_fifo_size); 2791 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 2792 hw->host_rx_fifo_size); 2793 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 2794 hw->host_nperio_tx_fifo_size); 2795 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 2796 hw->host_perio_tx_fifo_size); 2797 dev_dbg(hsotg->dev, "\n"); 2798 2799 return 0; 2800 } 2801 2802 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 2803 { 2804 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 2805 } 2806 2807 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 2808 { 2809 if (readl(hsotg->regs + GSNPSID) == 0xffffffff) 2810 return false; 2811 else 2812 return true; 2813 } 2814 2815 /** 2816 * dwc2_enable_global_interrupts() - Enables the controller's Global 2817 * Interrupt in the AHB Config register 2818 * 2819 * @hsotg: Programming view of DWC_otg controller 2820 */ 2821 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 2822 { 2823 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 2824 2825 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 2826 writel(ahbcfg, hsotg->regs + GAHBCFG); 2827 } 2828 2829 /** 2830 * dwc2_disable_global_interrupts() - Disables the controller's Global 2831 * Interrupt in the AHB Config register 2832 * 2833 * @hsotg: Programming view of DWC_otg controller 2834 */ 2835 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 2836 { 2837 u32 ahbcfg = readl(hsotg->regs + GAHBCFG); 2838 2839 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 2840 writel(ahbcfg, hsotg->regs + GAHBCFG); 2841 } 2842 2843 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); 2844 MODULE_AUTHOR("Synopsys, Inc."); 2845 MODULE_LICENSE("Dual BSD/GPL"); 2846