1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_graph.h> 27 #include <linux/acpi.h> 28 #include <linux/pci.h> 29 #include <linux/pinctrl/consumer.h> 30 #include <linux/pinctrl/devinfo.h> 31 #include <linux/reset.h> 32 #include <linux/bitfield.h> 33 34 #include <linux/usb/ch9.h> 35 #include <linux/usb/gadget.h> 36 #include <linux/usb/of.h> 37 #include <linux/usb/otg.h> 38 39 #include "core.h" 40 #include "gadget.h" 41 #include "glue.h" 42 #include "io.h" 43 44 #include "debug.h" 45 #include "../host/xhci-ext-caps.h" 46 47 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 48 49 /** 50 * dwc3_get_dr_mode - Validates and sets dr_mode 51 * @dwc: pointer to our context structure 52 */ 53 static int dwc3_get_dr_mode(struct dwc3 *dwc) 54 { 55 enum usb_dr_mode mode; 56 struct device *dev = dwc->dev; 57 unsigned int hw_mode; 58 59 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 60 dwc->dr_mode = USB_DR_MODE_OTG; 61 62 mode = dwc->dr_mode; 63 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 64 65 switch (hw_mode) { 66 case DWC3_GHWPARAMS0_MODE_GADGET: 67 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 68 dev_err(dev, 69 "Controller does not support host mode.\n"); 70 return -EINVAL; 71 } 72 mode = USB_DR_MODE_PERIPHERAL; 73 break; 74 case DWC3_GHWPARAMS0_MODE_HOST: 75 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 76 dev_err(dev, 77 "Controller does not support device mode.\n"); 78 return -EINVAL; 79 } 80 mode = USB_DR_MODE_HOST; 81 break; 82 default: 83 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 84 mode = USB_DR_MODE_HOST; 85 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 86 mode = USB_DR_MODE_PERIPHERAL; 87 88 /* 89 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 90 * mode. If the controller supports DRD but the dr_mode is not 91 * specified or set to OTG, then set the mode to peripheral. 92 */ 93 if (mode == USB_DR_MODE_OTG && !dwc->edev && 94 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 95 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 96 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 97 mode = USB_DR_MODE_PERIPHERAL; 98 } 99 100 if (mode != dwc->dr_mode) { 101 dev_warn(dev, 102 "Configuration mismatch. dr_mode forced to %s\n", 103 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 104 105 dwc->dr_mode = mode; 106 } 107 108 return 0; 109 } 110 111 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) 112 { 113 u32 reg; 114 int i; 115 116 for (i = 0; i < dwc->num_usb3_ports; i++) { 117 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i)); 118 if (enable && !dwc->dis_u3_susphy_quirk) 119 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 120 else 121 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 122 123 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg); 124 } 125 126 for (i = 0; i < dwc->num_usb2_ports; i++) { 127 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 128 if (enable && !dwc->dis_u2_susphy_quirk) 129 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 130 else 131 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 132 133 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 134 } 135 } 136 137 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) 138 { 139 unsigned int hw_mode; 140 u32 reg; 141 142 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 143 144 /* 145 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and 146 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, 147 * and they can be set after core initialization. 148 */ 149 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 150 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { 151 if (DWC3_GCTL_PRTCAP(reg) != mode) 152 dwc3_enable_susphy(dwc, false); 153 } 154 155 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 156 reg |= DWC3_GCTL_PRTCAPDIR(mode); 157 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 158 159 dwc->current_dr_role = mode; 160 trace_dwc3_set_prtcap(mode); 161 } 162 163 static void __dwc3_set_mode(struct work_struct *work) 164 { 165 struct dwc3 *dwc = work_to_dwc(work); 166 unsigned long flags; 167 int ret; 168 u32 reg; 169 u32 desired_dr_role; 170 int i; 171 172 mutex_lock(&dwc->mutex); 173 spin_lock_irqsave(&dwc->lock, flags); 174 desired_dr_role = dwc->desired_dr_role; 175 spin_unlock_irqrestore(&dwc->lock, flags); 176 177 pm_runtime_get_sync(dwc->dev); 178 179 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 180 dwc3_otg_update(dwc, 0); 181 182 if (!desired_dr_role) 183 goto out; 184 185 if (desired_dr_role == dwc->current_dr_role) 186 goto out; 187 188 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 189 goto out; 190 191 switch (dwc->current_dr_role) { 192 case DWC3_GCTL_PRTCAP_HOST: 193 dwc3_host_exit(dwc); 194 break; 195 case DWC3_GCTL_PRTCAP_DEVICE: 196 dwc3_gadget_exit(dwc); 197 dwc3_event_buffers_cleanup(dwc); 198 break; 199 case DWC3_GCTL_PRTCAP_OTG: 200 dwc3_otg_exit(dwc); 201 spin_lock_irqsave(&dwc->lock, flags); 202 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 203 spin_unlock_irqrestore(&dwc->lock, flags); 204 dwc3_otg_update(dwc, 1); 205 break; 206 default: 207 break; 208 } 209 210 /* 211 * When current_dr_role is not set, there's no role switching. 212 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 213 */ 214 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 215 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 216 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 217 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 218 reg |= DWC3_GCTL_CORESOFTRESET; 219 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 220 221 /* 222 * Wait for internal clocks to synchronized. DWC_usb31 and 223 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 224 * keep it consistent across different IPs, let's wait up to 225 * 100ms before clearing GCTL.CORESOFTRESET. 226 */ 227 msleep(100); 228 229 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 230 reg &= ~DWC3_GCTL_CORESOFTRESET; 231 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 232 } 233 234 spin_lock_irqsave(&dwc->lock, flags); 235 236 dwc3_set_prtcap(dwc, desired_dr_role, false); 237 238 spin_unlock_irqrestore(&dwc->lock, flags); 239 240 switch (desired_dr_role) { 241 case DWC3_GCTL_PRTCAP_HOST: 242 ret = dwc3_host_init(dwc); 243 if (ret) { 244 dev_err(dwc->dev, "failed to initialize host\n"); 245 } else { 246 if (dwc->usb2_phy) 247 otg_set_vbus(dwc->usb2_phy->otg, true); 248 249 for (i = 0; i < dwc->num_usb2_ports; i++) 250 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 251 for (i = 0; i < dwc->num_usb3_ports; i++) 252 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 253 254 if (dwc->dis_split_quirk) { 255 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 256 reg |= DWC3_GUCTL3_SPLITDISABLE; 257 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 258 } 259 } 260 break; 261 case DWC3_GCTL_PRTCAP_DEVICE: 262 dwc3_core_soft_reset(dwc); 263 264 dwc3_event_buffers_setup(dwc); 265 266 if (dwc->usb2_phy) 267 otg_set_vbus(dwc->usb2_phy->otg, false); 268 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 269 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 270 271 ret = dwc3_gadget_init(dwc); 272 if (ret) 273 dev_err(dwc->dev, "failed to initialize peripheral\n"); 274 break; 275 case DWC3_GCTL_PRTCAP_OTG: 276 dwc3_otg_init(dwc); 277 dwc3_otg_update(dwc, 0); 278 break; 279 default: 280 break; 281 } 282 283 out: 284 pm_runtime_mark_last_busy(dwc->dev); 285 pm_runtime_put_autosuspend(dwc->dev); 286 mutex_unlock(&dwc->mutex); 287 } 288 289 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 290 { 291 unsigned long flags; 292 293 if (dwc->dr_mode != USB_DR_MODE_OTG) 294 return; 295 296 spin_lock_irqsave(&dwc->lock, flags); 297 dwc->desired_dr_role = mode; 298 spin_unlock_irqrestore(&dwc->lock, flags); 299 300 queue_work(system_freezable_wq, &dwc->drd_work); 301 } 302 303 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 304 { 305 struct dwc3 *dwc = dep->dwc; 306 u32 reg; 307 308 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 309 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 310 DWC3_GDBGFIFOSPACE_TYPE(type)); 311 312 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 313 314 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 315 } 316 317 /** 318 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 319 * @dwc: pointer to our context structure 320 */ 321 int dwc3_core_soft_reset(struct dwc3 *dwc) 322 { 323 u32 reg; 324 int retries = 1000; 325 326 /* 327 * We're resetting only the device side because, if we're in host mode, 328 * XHCI driver will reset the host block. If dwc3 was configured for 329 * host-only mode, then we can return early. 330 */ 331 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 332 return 0; 333 334 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 335 reg |= DWC3_DCTL_CSFTRST; 336 reg &= ~DWC3_DCTL_RUN_STOP; 337 dwc3_gadget_dctl_write_safe(dwc, reg); 338 339 /* 340 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 341 * is cleared only after all the clocks are synchronized. This can 342 * take a little more than 50ms. Set the polling rate at 20ms 343 * for 10 times instead. 344 */ 345 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 346 retries = 10; 347 348 do { 349 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 350 if (!(reg & DWC3_DCTL_CSFTRST)) 351 goto done; 352 353 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 354 msleep(20); 355 else 356 udelay(1); 357 } while (--retries); 358 359 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 360 return -ETIMEDOUT; 361 362 done: 363 /* 364 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 365 * is cleared, we must wait at least 50ms before accessing the PHY 366 * domain (synchronization delay). 367 */ 368 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 369 msleep(50); 370 371 return 0; 372 } 373 374 /* 375 * dwc3_frame_length_adjustment - Adjusts frame length if required 376 * @dwc3: Pointer to our controller context structure 377 */ 378 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 379 { 380 u32 reg; 381 u32 dft; 382 383 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 384 return; 385 386 if (dwc->fladj == 0) 387 return; 388 389 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 390 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 391 if (dft != dwc->fladj) { 392 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 393 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 394 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 395 } 396 } 397 398 /** 399 * dwc3_ref_clk_period - Reference clock period configuration 400 * Default reference clock period depends on hardware 401 * configuration. For systems with reference clock that differs 402 * from the default, this will set clock period in DWC3_GUCTL 403 * register. 404 * @dwc: Pointer to our controller context structure 405 */ 406 static void dwc3_ref_clk_period(struct dwc3 *dwc) 407 { 408 unsigned long period; 409 unsigned long fladj; 410 unsigned long decr; 411 unsigned long rate; 412 u32 reg; 413 414 if (dwc->ref_clk) { 415 rate = clk_get_rate(dwc->ref_clk); 416 if (!rate) 417 return; 418 period = NSEC_PER_SEC / rate; 419 } else if (dwc->ref_clk_per) { 420 period = dwc->ref_clk_per; 421 rate = NSEC_PER_SEC / period; 422 } else { 423 return; 424 } 425 426 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 427 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 428 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 429 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 430 431 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 432 return; 433 434 /* 435 * The calculation below is 436 * 437 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 438 * 439 * but rearranged for fixed-point arithmetic. The division must be 440 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 441 * neither does rate * period). 442 * 443 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 444 * nanoseconds of error caused by the truncation which happened during 445 * the division when calculating rate or period (whichever one was 446 * derived from the other). We first calculate the relative error, then 447 * scale it to units of 8 ppm. 448 */ 449 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 450 fladj -= 125000; 451 452 /* 453 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 454 */ 455 decr = 480000000 / rate; 456 457 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 458 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 459 & ~DWC3_GFLADJ_240MHZDECR 460 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 461 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 462 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 463 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 464 465 if (dwc->gfladj_refclk_lpm_sel) 466 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 467 468 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 469 } 470 471 /** 472 * dwc3_free_one_event_buffer - Frees one event buffer 473 * @dwc: Pointer to our controller context structure 474 * @evt: Pointer to event buffer to be freed 475 */ 476 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 477 struct dwc3_event_buffer *evt) 478 { 479 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 480 } 481 482 /** 483 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 484 * @dwc: Pointer to our controller context structure 485 * @length: size of the event buffer 486 * 487 * Returns a pointer to the allocated event buffer structure on success 488 * otherwise ERR_PTR(errno). 489 */ 490 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 491 unsigned int length) 492 { 493 struct dwc3_event_buffer *evt; 494 495 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 496 if (!evt) 497 return ERR_PTR(-ENOMEM); 498 499 evt->dwc = dwc; 500 evt->length = length; 501 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 502 if (!evt->cache) 503 return ERR_PTR(-ENOMEM); 504 505 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 506 &evt->dma, GFP_KERNEL); 507 if (!evt->buf) 508 return ERR_PTR(-ENOMEM); 509 510 return evt; 511 } 512 513 /** 514 * dwc3_free_event_buffers - frees all allocated event buffers 515 * @dwc: Pointer to our controller context structure 516 */ 517 static void dwc3_free_event_buffers(struct dwc3 *dwc) 518 { 519 struct dwc3_event_buffer *evt; 520 521 evt = dwc->ev_buf; 522 if (evt) 523 dwc3_free_one_event_buffer(dwc, evt); 524 } 525 526 /** 527 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 528 * @dwc: pointer to our controller context structure 529 * @length: size of event buffer 530 * 531 * Returns 0 on success otherwise negative errno. In the error case, dwc 532 * may contain some buffers allocated but not all which were requested. 533 */ 534 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 535 { 536 struct dwc3_event_buffer *evt; 537 unsigned int hw_mode; 538 539 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 540 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 541 dwc->ev_buf = NULL; 542 return 0; 543 } 544 545 evt = dwc3_alloc_one_event_buffer(dwc, length); 546 if (IS_ERR(evt)) { 547 dev_err(dwc->dev, "can't allocate event buffer\n"); 548 return PTR_ERR(evt); 549 } 550 dwc->ev_buf = evt; 551 552 return 0; 553 } 554 555 /** 556 * dwc3_event_buffers_setup - setup our allocated event buffers 557 * @dwc: pointer to our controller context structure 558 * 559 * Returns 0 on success otherwise negative errno. 560 */ 561 int dwc3_event_buffers_setup(struct dwc3 *dwc) 562 { 563 struct dwc3_event_buffer *evt; 564 u32 reg; 565 566 if (!dwc->ev_buf) 567 return 0; 568 569 evt = dwc->ev_buf; 570 evt->lpos = 0; 571 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 572 lower_32_bits(evt->dma)); 573 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 574 upper_32_bits(evt->dma)); 575 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 576 DWC3_GEVNTSIZ_SIZE(evt->length)); 577 578 /* Clear any stale event */ 579 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 580 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 581 return 0; 582 } 583 584 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 585 { 586 struct dwc3_event_buffer *evt; 587 u32 reg; 588 589 if (!dwc->ev_buf) 590 return; 591 /* 592 * Exynos platforms may not be able to access event buffer if the 593 * controller failed to halt on dwc3_core_exit(). 594 */ 595 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 596 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 597 return; 598 599 evt = dwc->ev_buf; 600 601 evt->lpos = 0; 602 603 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 604 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 605 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 606 | DWC3_GEVNTSIZ_SIZE(0)); 607 608 /* Clear any stale event */ 609 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 610 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 611 } 612 613 static void dwc3_core_num_eps(struct dwc3 *dwc) 614 { 615 struct dwc3_hwparams *parms = &dwc->hwparams; 616 617 dwc->num_eps = DWC3_NUM_EPS(parms); 618 } 619 620 static void dwc3_cache_hwparams(struct dwc3 *dwc) 621 { 622 struct dwc3_hwparams *parms = &dwc->hwparams; 623 624 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 625 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 626 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 627 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 628 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 629 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 630 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 631 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 632 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 633 634 if (DWC3_IP_IS(DWC32)) 635 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 636 } 637 638 static void dwc3_config_soc_bus(struct dwc3 *dwc) 639 { 640 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 641 u32 reg; 642 643 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 644 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0); 645 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo); 646 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg); 647 } 648 } 649 650 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 651 { 652 int intf; 653 int ret = 0; 654 655 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 656 657 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 658 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 659 dwc->hsphy_interface && 660 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 661 ret = dwc3_ulpi_init(dwc); 662 663 return ret; 664 } 665 666 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) 667 { 668 u32 reg; 669 670 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index)); 671 672 /* 673 * Make sure UX_EXIT_PX is cleared as that causes issues with some 674 * PHYs. Also, this bit is not supposed to be used in normal operation. 675 */ 676 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 677 678 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ 679 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 680 681 if (dwc->u2ss_inp3_quirk) 682 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 683 684 if (dwc->dis_rxdet_inp3_quirk) 685 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 686 687 if (dwc->req_p1p2p3_quirk) 688 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 689 690 if (dwc->del_p1p2p3_quirk) 691 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 692 693 if (dwc->del_phy_power_chg_quirk) 694 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 695 696 if (dwc->lfps_filter_quirk) 697 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 698 699 if (dwc->rx_detect_poll_quirk) 700 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 701 702 if (dwc->tx_de_emphasis_quirk) 703 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 704 705 if (dwc->dis_del_phy_power_chg_quirk) 706 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 707 708 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg); 709 710 return 0; 711 } 712 713 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) 714 { 715 u32 reg; 716 717 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index)); 718 719 /* Select the HS PHY interface */ 720 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 721 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 722 if (dwc->hsphy_interface && 723 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 724 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 725 break; 726 } else if (dwc->hsphy_interface && 727 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 728 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 729 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 730 } else { 731 /* Relying on default value. */ 732 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 733 break; 734 } 735 fallthrough; 736 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 737 default: 738 break; 739 } 740 741 switch (dwc->hsphy_mode) { 742 case USBPHY_INTERFACE_MODE_UTMI: 743 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 744 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 745 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 746 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 747 break; 748 case USBPHY_INTERFACE_MODE_UTMIW: 749 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 750 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 751 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 752 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 753 break; 754 default: 755 break; 756 } 757 758 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ 759 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 760 761 if (dwc->dis_enblslpm_quirk) 762 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 763 else 764 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 765 766 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 767 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 768 769 /* 770 * Some ULPI USB PHY does not support internal VBUS supply, to drive 771 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 772 * bit of OTG_CTRL register. Controller configures the USB2 PHY 773 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 774 * with an external supply. 775 */ 776 if (dwc->ulpi_ext_vbus_drv) 777 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 778 779 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 780 781 return 0; 782 } 783 784 /** 785 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 786 * @dwc: Pointer to our controller context structure 787 * 788 * Returns 0 on success. The USB PHY interfaces are configured but not 789 * initialized. The PHY interfaces and the PHYs get initialized together with 790 * the core in dwc3_core_init. 791 */ 792 static int dwc3_phy_setup(struct dwc3 *dwc) 793 { 794 int i; 795 int ret; 796 797 for (i = 0; i < dwc->num_usb3_ports; i++) { 798 ret = dwc3_ss_phy_setup(dwc, i); 799 if (ret) 800 return ret; 801 } 802 803 for (i = 0; i < dwc->num_usb2_ports; i++) { 804 ret = dwc3_hs_phy_setup(dwc, i); 805 if (ret) 806 return ret; 807 } 808 809 return 0; 810 } 811 812 static int dwc3_phy_init(struct dwc3 *dwc) 813 { 814 int ret; 815 int i; 816 int j; 817 818 usb_phy_init(dwc->usb2_phy); 819 usb_phy_init(dwc->usb3_phy); 820 821 for (i = 0; i < dwc->num_usb2_ports; i++) { 822 ret = phy_init(dwc->usb2_generic_phy[i]); 823 if (ret < 0) 824 goto err_exit_usb2_phy; 825 } 826 827 for (j = 0; j < dwc->num_usb3_ports; j++) { 828 ret = phy_init(dwc->usb3_generic_phy[j]); 829 if (ret < 0) 830 goto err_exit_usb3_phy; 831 } 832 833 /* 834 * Above DWC_usb3.0 1.94a, it is recommended to set 835 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during 836 * coreConsultant configuration. So default value will be '0' when the 837 * core is reset. Application needs to set it to '1' after the core 838 * initialization is completed. 839 * 840 * Certain phy requires to be in P0 power state during initialization. 841 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear 842 * prior to phy init to maintain in the P0 state. 843 * 844 * After phy initialization, some phy operations can only be executed 845 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and 846 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid 847 * blocking phy ops. 848 */ 849 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 850 dwc3_enable_susphy(dwc, true); 851 852 return 0; 853 854 err_exit_usb3_phy: 855 while (--j >= 0) 856 phy_exit(dwc->usb3_generic_phy[j]); 857 858 err_exit_usb2_phy: 859 while (--i >= 0) 860 phy_exit(dwc->usb2_generic_phy[i]); 861 862 usb_phy_shutdown(dwc->usb3_phy); 863 usb_phy_shutdown(dwc->usb2_phy); 864 865 return ret; 866 } 867 868 static void dwc3_phy_exit(struct dwc3 *dwc) 869 { 870 int i; 871 872 for (i = 0; i < dwc->num_usb3_ports; i++) 873 phy_exit(dwc->usb3_generic_phy[i]); 874 875 for (i = 0; i < dwc->num_usb2_ports; i++) 876 phy_exit(dwc->usb2_generic_phy[i]); 877 878 usb_phy_shutdown(dwc->usb3_phy); 879 usb_phy_shutdown(dwc->usb2_phy); 880 } 881 882 static int dwc3_phy_power_on(struct dwc3 *dwc) 883 { 884 int ret; 885 int i; 886 int j; 887 888 usb_phy_set_suspend(dwc->usb2_phy, 0); 889 usb_phy_set_suspend(dwc->usb3_phy, 0); 890 891 for (i = 0; i < dwc->num_usb2_ports; i++) { 892 ret = phy_power_on(dwc->usb2_generic_phy[i]); 893 if (ret < 0) 894 goto err_power_off_usb2_phy; 895 } 896 897 for (j = 0; j < dwc->num_usb3_ports; j++) { 898 ret = phy_power_on(dwc->usb3_generic_phy[j]); 899 if (ret < 0) 900 goto err_power_off_usb3_phy; 901 } 902 903 return 0; 904 905 err_power_off_usb3_phy: 906 while (--j >= 0) 907 phy_power_off(dwc->usb3_generic_phy[j]); 908 909 err_power_off_usb2_phy: 910 while (--i >= 0) 911 phy_power_off(dwc->usb2_generic_phy[i]); 912 913 usb_phy_set_suspend(dwc->usb3_phy, 1); 914 usb_phy_set_suspend(dwc->usb2_phy, 1); 915 916 return ret; 917 } 918 919 static void dwc3_phy_power_off(struct dwc3 *dwc) 920 { 921 int i; 922 923 for (i = 0; i < dwc->num_usb3_ports; i++) 924 phy_power_off(dwc->usb3_generic_phy[i]); 925 926 for (i = 0; i < dwc->num_usb2_ports; i++) 927 phy_power_off(dwc->usb2_generic_phy[i]); 928 929 usb_phy_set_suspend(dwc->usb3_phy, 1); 930 usb_phy_set_suspend(dwc->usb2_phy, 1); 931 } 932 933 static int dwc3_clk_enable(struct dwc3 *dwc) 934 { 935 int ret; 936 937 ret = clk_prepare_enable(dwc->bus_clk); 938 if (ret) 939 return ret; 940 941 ret = clk_prepare_enable(dwc->ref_clk); 942 if (ret) 943 goto disable_bus_clk; 944 945 ret = clk_prepare_enable(dwc->susp_clk); 946 if (ret) 947 goto disable_ref_clk; 948 949 ret = clk_prepare_enable(dwc->utmi_clk); 950 if (ret) 951 goto disable_susp_clk; 952 953 ret = clk_prepare_enable(dwc->pipe_clk); 954 if (ret) 955 goto disable_utmi_clk; 956 957 return 0; 958 959 disable_utmi_clk: 960 clk_disable_unprepare(dwc->utmi_clk); 961 disable_susp_clk: 962 clk_disable_unprepare(dwc->susp_clk); 963 disable_ref_clk: 964 clk_disable_unprepare(dwc->ref_clk); 965 disable_bus_clk: 966 clk_disable_unprepare(dwc->bus_clk); 967 return ret; 968 } 969 970 static void dwc3_clk_disable(struct dwc3 *dwc) 971 { 972 clk_disable_unprepare(dwc->pipe_clk); 973 clk_disable_unprepare(dwc->utmi_clk); 974 clk_disable_unprepare(dwc->susp_clk); 975 clk_disable_unprepare(dwc->ref_clk); 976 clk_disable_unprepare(dwc->bus_clk); 977 } 978 979 static void dwc3_core_exit(struct dwc3 *dwc) 980 { 981 dwc3_event_buffers_cleanup(dwc); 982 dwc3_phy_power_off(dwc); 983 dwc3_phy_exit(dwc); 984 dwc3_clk_disable(dwc); 985 reset_control_assert(dwc->reset); 986 } 987 988 static bool dwc3_core_is_valid(struct dwc3 *dwc) 989 { 990 u32 reg; 991 992 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 993 dwc->ip = DWC3_GSNPS_ID(reg); 994 995 /* This should read as U3 followed by revision number */ 996 if (DWC3_IP_IS(DWC3)) { 997 dwc->revision = reg; 998 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 999 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 1000 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 1001 } else { 1002 return false; 1003 } 1004 1005 return true; 1006 } 1007 1008 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 1009 { 1010 unsigned int power_opt; 1011 unsigned int hw_mode; 1012 u32 reg; 1013 1014 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1015 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 1016 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1017 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 1018 1019 switch (power_opt) { 1020 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 1021 /** 1022 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 1023 * issue which would cause xHCI compliance tests to fail. 1024 * 1025 * Because of that we cannot enable clock gating on such 1026 * configurations. 1027 * 1028 * Refers to: 1029 * 1030 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 1031 * SOF/ITP Mode Used 1032 */ 1033 if ((dwc->dr_mode == USB_DR_MODE_HOST || 1034 dwc->dr_mode == USB_DR_MODE_OTG) && 1035 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 1036 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 1037 else 1038 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 1039 break; 1040 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 1041 /* 1042 * REVISIT Enabling this bit so that host-mode hibernation 1043 * will work. Device-mode hibernation is not yet implemented. 1044 */ 1045 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 1046 break; 1047 default: 1048 /* nothing */ 1049 break; 1050 } 1051 1052 /* 1053 * This is a workaround for STAR#4846132, which only affects 1054 * DWC_usb31 version2.00a operating in host mode. 1055 * 1056 * There is a problem in DWC_usb31 version 2.00a operating 1057 * in host mode that would cause a CSR read timeout When CSR 1058 * read coincides with RAM Clock Gating Entry. By disable 1059 * Clock Gating, sacrificing power consumption for normal 1060 * operation. 1061 */ 1062 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && 1063 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) 1064 reg |= DWC3_GCTL_DSBLCLKGTNG; 1065 1066 /* check if current dwc3 is on simulation board */ 1067 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 1068 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 1069 dwc->is_fpga = true; 1070 } 1071 1072 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 1073 "disable_scramble cannot be used on non-FPGA builds\n"); 1074 1075 if (dwc->disable_scramble_quirk && dwc->is_fpga) 1076 reg |= DWC3_GCTL_DISSCRAMBLE; 1077 else 1078 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1079 1080 if (dwc->u2exit_lfps_quirk) 1081 reg |= DWC3_GCTL_U2EXIT_LFPS; 1082 1083 /* 1084 * WORKAROUND: DWC3 revisions <1.90a have a bug 1085 * where the device can fail to connect at SuperSpeed 1086 * and falls back to high-speed mode which causes 1087 * the device to enter a Connect/Disconnect loop 1088 */ 1089 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 1090 reg |= DWC3_GCTL_U2RSTECN; 1091 1092 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1093 } 1094 1095 static int dwc3_core_get_phy(struct dwc3 *dwc); 1096 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 1097 1098 /* set global incr burst type configuration registers */ 1099 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 1100 { 1101 struct device *dev = dwc->dev; 1102 /* incrx_mode : for INCR burst type. */ 1103 bool incrx_mode; 1104 /* incrx_size : for size of INCRX burst. */ 1105 u32 incrx_size; 1106 u32 *vals; 1107 u32 cfg; 1108 int ntype; 1109 int ret; 1110 int i; 1111 1112 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 1113 1114 /* 1115 * Handle property "snps,incr-burst-type-adjustment". 1116 * Get the number of value from this property: 1117 * result <= 0, means this property is not supported. 1118 * result = 1, means INCRx burst mode supported. 1119 * result > 1, means undefined length burst mode supported. 1120 */ 1121 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 1122 if (ntype <= 0) 1123 return; 1124 1125 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 1126 if (!vals) 1127 return; 1128 1129 /* Get INCR burst type, and parse it */ 1130 ret = device_property_read_u32_array(dev, 1131 "snps,incr-burst-type-adjustment", vals, ntype); 1132 if (ret) { 1133 kfree(vals); 1134 dev_err(dev, "Error to get property\n"); 1135 return; 1136 } 1137 1138 incrx_size = *vals; 1139 1140 if (ntype > 1) { 1141 /* INCRX (undefined length) burst mode */ 1142 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 1143 for (i = 1; i < ntype; i++) { 1144 if (vals[i] > incrx_size) 1145 incrx_size = vals[i]; 1146 } 1147 } else { 1148 /* INCRX burst mode */ 1149 incrx_mode = INCRX_BURST_MODE; 1150 } 1151 1152 kfree(vals); 1153 1154 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1155 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1156 if (incrx_mode) 1157 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1158 switch (incrx_size) { 1159 case 256: 1160 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1161 break; 1162 case 128: 1163 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1164 break; 1165 case 64: 1166 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1167 break; 1168 case 32: 1169 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1170 break; 1171 case 16: 1172 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1173 break; 1174 case 8: 1175 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1176 break; 1177 case 4: 1178 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1179 break; 1180 case 1: 1181 break; 1182 default: 1183 dev_err(dev, "Invalid property\n"); 1184 break; 1185 } 1186 1187 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1188 } 1189 1190 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1191 { 1192 u32 scale; 1193 u32 reg; 1194 1195 if (!dwc->susp_clk) 1196 return; 1197 1198 /* 1199 * The power down scale field specifies how many suspend_clk 1200 * periods fit into a 16KHz clock period. When performing 1201 * the division, round up the remainder. 1202 * 1203 * The power down scale value is calculated using the fastest 1204 * frequency of the suspend_clk. If it isn't fixed (but within 1205 * the accuracy requirement), the driver may not know the max 1206 * rate of the suspend_clk, so only update the power down scale 1207 * if the default is less than the calculated value from 1208 * clk_get_rate() or if the default is questionably high 1209 * (3x or more) to be within the requirement. 1210 */ 1211 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1212 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1213 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1214 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1215 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1216 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1217 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1218 } 1219 } 1220 1221 static void dwc3_config_threshold(struct dwc3 *dwc) 1222 { 1223 u32 reg; 1224 u8 rx_thr_num; 1225 u8 rx_maxburst; 1226 u8 tx_thr_num; 1227 u8 tx_maxburst; 1228 1229 /* 1230 * Must config both number of packets and max burst settings to enable 1231 * RX and/or TX threshold. 1232 */ 1233 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1234 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1235 rx_maxburst = dwc->rx_max_burst_prd; 1236 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1237 tx_maxburst = dwc->tx_max_burst_prd; 1238 1239 if (rx_thr_num && rx_maxburst) { 1240 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1241 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1242 1243 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1244 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1245 1246 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1247 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1248 1249 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1250 } 1251 1252 if (tx_thr_num && tx_maxburst) { 1253 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1254 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1255 1256 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1257 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1258 1259 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1260 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1261 1262 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1263 } 1264 } 1265 1266 rx_thr_num = dwc->rx_thr_num_pkt; 1267 rx_maxburst = dwc->rx_max_burst; 1268 tx_thr_num = dwc->tx_thr_num_pkt; 1269 tx_maxburst = dwc->tx_max_burst; 1270 1271 if (DWC3_IP_IS(DWC3)) { 1272 if (rx_thr_num && rx_maxburst) { 1273 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1274 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1275 1276 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1277 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1278 1279 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1280 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1281 1282 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1283 } 1284 1285 if (tx_thr_num && tx_maxburst) { 1286 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1287 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1288 1289 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1290 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1291 1292 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1293 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1294 1295 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1296 } 1297 } else { 1298 if (rx_thr_num && rx_maxburst) { 1299 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1300 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1301 1302 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1303 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1304 1305 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1306 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1307 1308 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1309 } 1310 1311 if (tx_thr_num && tx_maxburst) { 1312 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1313 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1314 1315 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1316 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1317 1318 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1319 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1320 1321 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1322 } 1323 } 1324 } 1325 1326 /** 1327 * dwc3_core_init - Low-level initialization of DWC3 Core 1328 * @dwc: Pointer to our controller context structure 1329 * 1330 * Returns 0 on success otherwise negative errno. 1331 */ 1332 static int dwc3_core_init(struct dwc3 *dwc) 1333 { 1334 unsigned int hw_mode; 1335 u32 reg; 1336 int ret; 1337 1338 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1339 1340 /* 1341 * Write Linux Version Code to our GUID register so it's easy to figure 1342 * out which kernel version a bug was found. 1343 */ 1344 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1345 1346 ret = dwc3_phy_setup(dwc); 1347 if (ret) 1348 return ret; 1349 1350 if (!dwc->ulpi_ready) { 1351 ret = dwc3_core_ulpi_init(dwc); 1352 if (ret) { 1353 if (ret == -ETIMEDOUT) { 1354 dwc3_core_soft_reset(dwc); 1355 ret = -EPROBE_DEFER; 1356 } 1357 return ret; 1358 } 1359 dwc->ulpi_ready = true; 1360 } 1361 1362 if (!dwc->phys_ready) { 1363 ret = dwc3_core_get_phy(dwc); 1364 if (ret) 1365 goto err_exit_ulpi; 1366 dwc->phys_ready = true; 1367 } 1368 1369 ret = dwc3_phy_init(dwc); 1370 if (ret) 1371 goto err_exit_ulpi; 1372 1373 ret = dwc3_core_soft_reset(dwc); 1374 if (ret) 1375 goto err_exit_phy; 1376 1377 dwc3_core_setup_global_control(dwc); 1378 dwc3_core_num_eps(dwc); 1379 1380 /* Set power down scale of suspend_clk */ 1381 dwc3_set_power_down_clk_scale(dwc); 1382 1383 /* Adjust Frame Length */ 1384 dwc3_frame_length_adjustment(dwc); 1385 1386 /* Adjust Reference Clock Period */ 1387 dwc3_ref_clk_period(dwc); 1388 1389 dwc3_set_incr_burst_type(dwc); 1390 1391 dwc3_config_soc_bus(dwc); 1392 1393 ret = dwc3_phy_power_on(dwc); 1394 if (ret) 1395 goto err_exit_phy; 1396 1397 ret = dwc3_event_buffers_setup(dwc); 1398 if (ret) { 1399 dev_err(dwc->dev, "failed to setup event buffers\n"); 1400 goto err_power_off_phy; 1401 } 1402 1403 /* 1404 * ENDXFER polling is available on version 3.10a and later of 1405 * the DWC_usb3 controller. It is NOT available in the 1406 * DWC_usb31 controller. 1407 */ 1408 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1409 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1410 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1411 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1412 } 1413 1414 /* 1415 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a 1416 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the 1417 * link compliance test (TD7.21) may fail. If the ECN is not 1418 * enabled (GUCTL2[19] = 0), the controller will use the old timer 1419 * value (5us), which is still acceptable for the link compliance 1420 * test. Therefore, do not enable PM TIMER ECM in 3.20a by 1421 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. 1422 */ 1423 if (DWC3_VER_IS(DWC3, 320A)) { 1424 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1425 reg &= ~DWC3_GUCTL2_LC_TIMER; 1426 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1427 } 1428 1429 /* 1430 * When configured in HOST mode, after issuing U3/L2 exit controller 1431 * fails to send proper CRC checksum in CRC5 field. Because of this 1432 * behaviour Transaction Error is generated, resulting in reset and 1433 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1434 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1435 * will correct this problem. This option is to support certain 1436 * legacy ULPI PHYs. 1437 */ 1438 if (dwc->resume_hs_terminations) { 1439 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1440 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1441 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1442 } 1443 1444 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1445 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1446 1447 /* 1448 * Enable hardware control of sending remote wakeup 1449 * in HS when the device is in the L1 state. 1450 */ 1451 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1452 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1453 1454 /* 1455 * Decouple USB 2.0 L1 & L2 events which will allow for 1456 * gadget driver to only receive U3/L2 suspend & wakeup 1457 * events and prevent the more frequent L1 LPM transitions 1458 * from interrupting the driver. 1459 */ 1460 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1461 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1462 1463 if (dwc->dis_tx_ipgap_linecheck_quirk) 1464 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1465 1466 if (dwc->parkmode_disable_ss_quirk) 1467 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1468 1469 if (dwc->parkmode_disable_hs_quirk) 1470 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1471 1472 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) { 1473 if (dwc->maximum_speed == USB_SPEED_FULL || 1474 dwc->maximum_speed == USB_SPEED_HIGH) 1475 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1476 else 1477 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1478 } 1479 1480 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1481 } 1482 1483 dwc3_config_threshold(dwc); 1484 1485 /* 1486 * Modify this for all supported Super Speed ports when 1487 * multiport support is added. 1488 */ 1489 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1490 (DWC3_IP_IS(DWC31)) && 1491 dwc->maximum_speed == USB_SPEED_SUPER) { 1492 int i; 1493 1494 for (i = 0; i < dwc->num_usb3_ports; i++) { 1495 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i)); 1496 reg |= DWC3_LLUCTL_FORCE_GEN1; 1497 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg); 1498 } 1499 } 1500 1501 /* 1502 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and 1503 * prior. When an active endpoint not currently cached in the host 1504 * controller is chosen to be cached to the same index as an endpoint 1505 * receiving NAKs, the endpoint receiving NAKs enters continuous 1506 * retry mode. This prevents it from being evicted from the host 1507 * controller cache, blocking the new endpoint from being cached and 1508 * serviced. 1509 * 1510 * To resolve this, for controller versions 1.70a and 1.80a, set the 1511 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit 1512 * disables the USB2.0 internal retry feature. The GUCTL3[16] register 1513 * function is available only from version 1.70a. 1514 */ 1515 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) { 1516 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 1517 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; 1518 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 1519 } 1520 1521 return 0; 1522 1523 err_power_off_phy: 1524 dwc3_phy_power_off(dwc); 1525 err_exit_phy: 1526 dwc3_phy_exit(dwc); 1527 err_exit_ulpi: 1528 dwc3_ulpi_exit(dwc); 1529 1530 return ret; 1531 } 1532 1533 static int dwc3_core_get_phy(struct dwc3 *dwc) 1534 { 1535 struct device *dev = dwc->dev; 1536 struct device_node *node = dev->of_node; 1537 char phy_name[9]; 1538 int ret; 1539 u8 i; 1540 1541 if (node) { 1542 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1543 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1544 } else { 1545 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1546 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1547 } 1548 1549 if (IS_ERR(dwc->usb2_phy)) { 1550 ret = PTR_ERR(dwc->usb2_phy); 1551 if (ret == -ENXIO || ret == -ENODEV) 1552 dwc->usb2_phy = NULL; 1553 else 1554 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1555 } 1556 1557 if (IS_ERR(dwc->usb3_phy)) { 1558 ret = PTR_ERR(dwc->usb3_phy); 1559 if (ret == -ENXIO || ret == -ENODEV) 1560 dwc->usb3_phy = NULL; 1561 else 1562 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1563 } 1564 1565 for (i = 0; i < dwc->num_usb2_ports; i++) { 1566 if (dwc->num_usb2_ports == 1) 1567 snprintf(phy_name, sizeof(phy_name), "usb2-phy"); 1568 else 1569 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i); 1570 1571 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name); 1572 if (IS_ERR(dwc->usb2_generic_phy[i])) { 1573 ret = PTR_ERR(dwc->usb2_generic_phy[i]); 1574 if (ret == -ENOSYS || ret == -ENODEV) 1575 dwc->usb2_generic_phy[i] = NULL; 1576 else 1577 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1578 phy_name); 1579 } 1580 } 1581 1582 for (i = 0; i < dwc->num_usb3_ports; i++) { 1583 if (dwc->num_usb3_ports == 1) 1584 snprintf(phy_name, sizeof(phy_name), "usb3-phy"); 1585 else 1586 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i); 1587 1588 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name); 1589 if (IS_ERR(dwc->usb3_generic_phy[i])) { 1590 ret = PTR_ERR(dwc->usb3_generic_phy[i]); 1591 if (ret == -ENOSYS || ret == -ENODEV) 1592 dwc->usb3_generic_phy[i] = NULL; 1593 else 1594 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1595 phy_name); 1596 } 1597 } 1598 1599 return 0; 1600 } 1601 1602 static int dwc3_core_init_mode(struct dwc3 *dwc) 1603 { 1604 struct device *dev = dwc->dev; 1605 int ret; 1606 int i; 1607 1608 switch (dwc->dr_mode) { 1609 case USB_DR_MODE_PERIPHERAL: 1610 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); 1611 1612 if (dwc->usb2_phy) 1613 otg_set_vbus(dwc->usb2_phy->otg, false); 1614 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 1615 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 1616 1617 ret = dwc3_gadget_init(dwc); 1618 if (ret) 1619 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1620 break; 1621 case USB_DR_MODE_HOST: 1622 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); 1623 1624 if (dwc->usb2_phy) 1625 otg_set_vbus(dwc->usb2_phy->otg, true); 1626 for (i = 0; i < dwc->num_usb2_ports; i++) 1627 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 1628 for (i = 0; i < dwc->num_usb3_ports; i++) 1629 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 1630 1631 ret = dwc3_host_init(dwc); 1632 if (ret) 1633 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1634 break; 1635 case USB_DR_MODE_OTG: 1636 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1637 ret = dwc3_drd_init(dwc); 1638 if (ret) 1639 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1640 break; 1641 default: 1642 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1643 return -EINVAL; 1644 } 1645 1646 return 0; 1647 } 1648 1649 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1650 { 1651 switch (dwc->dr_mode) { 1652 case USB_DR_MODE_PERIPHERAL: 1653 dwc3_gadget_exit(dwc); 1654 break; 1655 case USB_DR_MODE_HOST: 1656 dwc3_host_exit(dwc); 1657 break; 1658 case USB_DR_MODE_OTG: 1659 dwc3_drd_exit(dwc); 1660 break; 1661 default: 1662 /* do nothing */ 1663 break; 1664 } 1665 1666 /* de-assert DRVVBUS for HOST and OTG mode */ 1667 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 1668 } 1669 1670 static void dwc3_get_software_properties(struct dwc3 *dwc) 1671 { 1672 struct device *tmpdev; 1673 u16 gsbuscfg0_reqinfo; 1674 int ret; 1675 1676 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED; 1677 1678 /* 1679 * Iterate over all parent nodes for finding swnode properties 1680 * and non-DT (non-ABI) properties. 1681 */ 1682 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) { 1683 ret = device_property_read_u16(tmpdev, 1684 "snps,gsbuscfg0-reqinfo", 1685 &gsbuscfg0_reqinfo); 1686 if (!ret) 1687 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo; 1688 } 1689 } 1690 1691 static void dwc3_get_properties(struct dwc3 *dwc) 1692 { 1693 struct device *dev = dwc->dev; 1694 u8 lpm_nyet_threshold; 1695 u8 tx_de_emphasis; 1696 u8 hird_threshold; 1697 u8 rx_thr_num_pkt = 0; 1698 u8 rx_max_burst = 0; 1699 u8 tx_thr_num_pkt = 0; 1700 u8 tx_max_burst = 0; 1701 u8 rx_thr_num_pkt_prd = 0; 1702 u8 rx_max_burst_prd = 0; 1703 u8 tx_thr_num_pkt_prd = 0; 1704 u8 tx_max_burst_prd = 0; 1705 u8 tx_fifo_resize_max_num; 1706 u16 num_hc_interrupters; 1707 1708 /* default to highest possible threshold */ 1709 lpm_nyet_threshold = 0xf; 1710 1711 /* default to -3.5dB de-emphasis */ 1712 tx_de_emphasis = 1; 1713 1714 /* 1715 * default to assert utmi_sleep_n and use maximum allowed HIRD 1716 * threshold value of 0b1100 1717 */ 1718 hird_threshold = 12; 1719 1720 /* 1721 * default to a TXFIFO size large enough to fit 6 max packets. This 1722 * allows for systems with larger bus latencies to have some headroom 1723 * for endpoints that have a large bMaxBurst value. 1724 */ 1725 tx_fifo_resize_max_num = 6; 1726 1727 /* default to a single XHCI interrupter */ 1728 num_hc_interrupters = 1; 1729 1730 dwc->maximum_speed = usb_get_maximum_speed(dev); 1731 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1732 dwc->dr_mode = usb_get_dr_mode(dev); 1733 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1734 1735 dwc->sysdev_is_parent = device_property_read_bool(dev, 1736 "linux,sysdev_is_parent"); 1737 if (dwc->sysdev_is_parent) 1738 dwc->sysdev = dwc->dev->parent; 1739 else 1740 dwc->sysdev = dwc->dev; 1741 1742 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1743 1744 dwc->has_lpm_erratum = device_property_read_bool(dev, 1745 "snps,has-lpm-erratum"); 1746 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1747 &lpm_nyet_threshold); 1748 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1749 "snps,is-utmi-l1-suspend"); 1750 device_property_read_u8(dev, "snps,hird-threshold", 1751 &hird_threshold); 1752 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1753 "snps,dis-start-transfer-quirk"); 1754 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1755 "snps,usb3_lpm_capable"); 1756 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1757 "snps,usb2-lpm-disable"); 1758 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1759 "snps,usb2-gadget-lpm-disable"); 1760 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1761 &rx_thr_num_pkt); 1762 device_property_read_u8(dev, "snps,rx-max-burst", 1763 &rx_max_burst); 1764 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1765 &tx_thr_num_pkt); 1766 device_property_read_u8(dev, "snps,tx-max-burst", 1767 &tx_max_burst); 1768 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1769 &rx_thr_num_pkt_prd); 1770 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1771 &rx_max_burst_prd); 1772 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1773 &tx_thr_num_pkt_prd); 1774 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1775 &tx_max_burst_prd); 1776 device_property_read_u16(dev, "num-hc-interrupters", 1777 &num_hc_interrupters); 1778 /* DWC3 core allowed to have a max of 8 interrupters */ 1779 if (num_hc_interrupters > 8) 1780 num_hc_interrupters = 8; 1781 1782 dwc->do_fifo_resize = device_property_read_bool(dev, 1783 "tx-fifo-resize"); 1784 if (dwc->do_fifo_resize) 1785 device_property_read_u8(dev, "tx-fifo-max-num", 1786 &tx_fifo_resize_max_num); 1787 1788 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1789 "snps,disable_scramble_quirk"); 1790 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1791 "snps,u2exit_lfps_quirk"); 1792 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1793 "snps,u2ss_inp3_quirk"); 1794 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1795 "snps,req_p1p2p3_quirk"); 1796 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1797 "snps,del_p1p2p3_quirk"); 1798 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1799 "snps,del_phy_power_chg_quirk"); 1800 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1801 "snps,lfps_filter_quirk"); 1802 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1803 "snps,rx_detect_poll_quirk"); 1804 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1805 "snps,dis_u3_susphy_quirk"); 1806 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1807 "snps,dis_u2_susphy_quirk"); 1808 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1809 "snps,dis_enblslpm_quirk"); 1810 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1811 "snps,dis-u1-entry-quirk"); 1812 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1813 "snps,dis-u2-entry-quirk"); 1814 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1815 "snps,dis_rxdet_inp3_quirk"); 1816 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1817 "snps,dis-u2-freeclk-exists-quirk"); 1818 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1819 "snps,dis-del-phy-power-chg-quirk"); 1820 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1821 "snps,dis-tx-ipgap-linecheck-quirk"); 1822 dwc->resume_hs_terminations = device_property_read_bool(dev, 1823 "snps,resume-hs-terminations"); 1824 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1825 "snps,ulpi-ext-vbus-drv"); 1826 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1827 "snps,parkmode-disable-ss-quirk"); 1828 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1829 "snps,parkmode-disable-hs-quirk"); 1830 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1831 "snps,gfladj-refclk-lpm-sel-quirk"); 1832 1833 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1834 "snps,tx_de_emphasis_quirk"); 1835 device_property_read_u8(dev, "snps,tx_de_emphasis", 1836 &tx_de_emphasis); 1837 device_property_read_string(dev, "snps,hsphy_interface", 1838 &dwc->hsphy_interface); 1839 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1840 &dwc->fladj); 1841 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1842 &dwc->ref_clk_per); 1843 1844 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1845 "snps,dis_metastability_quirk"); 1846 1847 dwc->dis_split_quirk = device_property_read_bool(dev, 1848 "snps,dis-split-quirk"); 1849 1850 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1851 dwc->tx_de_emphasis = tx_de_emphasis; 1852 1853 dwc->hird_threshold = hird_threshold; 1854 1855 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1856 dwc->rx_max_burst = rx_max_burst; 1857 1858 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1859 dwc->tx_max_burst = tx_max_burst; 1860 1861 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1862 dwc->rx_max_burst_prd = rx_max_burst_prd; 1863 1864 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1865 dwc->tx_max_burst_prd = tx_max_burst_prd; 1866 1867 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1868 1869 dwc->num_hc_interrupters = num_hc_interrupters; 1870 } 1871 1872 /* check whether the core supports IMOD */ 1873 bool dwc3_has_imod(struct dwc3 *dwc) 1874 { 1875 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1876 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1877 DWC3_IP_IS(DWC32); 1878 } 1879 1880 static void dwc3_check_params(struct dwc3 *dwc) 1881 { 1882 struct device *dev = dwc->dev; 1883 unsigned int hwparam_gen = 1884 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1885 1886 /* 1887 * Enable IMOD for all supporting controllers. 1888 * 1889 * Particularly, DWC_usb3 v3.00a must enable this feature for 1890 * the following reason: 1891 * 1892 * Workaround for STAR 9000961433 which affects only version 1893 * 3.00a of the DWC_usb3 core. This prevents the controller 1894 * interrupt from being masked while handling events. IMOD 1895 * allows us to work around this issue. Enable it for the 1896 * affected version. 1897 */ 1898 if (dwc3_has_imod((dwc))) 1899 dwc->imod_interval = 1; 1900 1901 /* Check the maximum_speed parameter */ 1902 switch (dwc->maximum_speed) { 1903 case USB_SPEED_FULL: 1904 case USB_SPEED_HIGH: 1905 break; 1906 case USB_SPEED_SUPER: 1907 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1908 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1909 break; 1910 case USB_SPEED_SUPER_PLUS: 1911 if ((DWC3_IP_IS(DWC32) && 1912 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1913 (!DWC3_IP_IS(DWC32) && 1914 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1915 dev_warn(dev, "UDC doesn't support SSP\n"); 1916 break; 1917 default: 1918 dev_err(dev, "invalid maximum_speed parameter %d\n", 1919 dwc->maximum_speed); 1920 fallthrough; 1921 case USB_SPEED_UNKNOWN: 1922 switch (hwparam_gen) { 1923 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1924 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1925 break; 1926 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1927 if (DWC3_IP_IS(DWC32)) 1928 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1929 else 1930 dwc->maximum_speed = USB_SPEED_SUPER; 1931 break; 1932 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1933 dwc->maximum_speed = USB_SPEED_HIGH; 1934 break; 1935 default: 1936 dwc->maximum_speed = USB_SPEED_SUPER; 1937 break; 1938 } 1939 break; 1940 } 1941 1942 /* 1943 * Currently the controller does not have visibility into the HW 1944 * parameter to determine the maximum number of lanes the HW supports. 1945 * If the number of lanes is not specified in the device property, then 1946 * set the default to support dual-lane for DWC_usb32 and single-lane 1947 * for DWC_usb31 for super-speed-plus. 1948 */ 1949 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1950 switch (dwc->max_ssp_rate) { 1951 case USB_SSP_GEN_2x1: 1952 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1953 dev_warn(dev, "UDC only supports Gen 1\n"); 1954 break; 1955 case USB_SSP_GEN_1x2: 1956 case USB_SSP_GEN_2x2: 1957 if (DWC3_IP_IS(DWC31)) 1958 dev_warn(dev, "UDC only supports single lane\n"); 1959 break; 1960 case USB_SSP_GEN_UNKNOWN: 1961 default: 1962 switch (hwparam_gen) { 1963 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1964 if (DWC3_IP_IS(DWC32)) 1965 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1966 else 1967 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1968 break; 1969 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1970 if (DWC3_IP_IS(DWC32)) 1971 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1972 break; 1973 } 1974 break; 1975 } 1976 } 1977 } 1978 1979 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1980 { 1981 struct device *dev = dwc->dev; 1982 struct device_node *np_phy; 1983 struct extcon_dev *edev = NULL; 1984 const char *name; 1985 1986 if (device_property_present(dev, "extcon")) 1987 return extcon_get_edev_by_phandle(dev, 0); 1988 1989 /* 1990 * Device tree platforms should get extcon via phandle. 1991 * On ACPI platforms, we get the name from a device property. 1992 * This device property is for kernel internal use only and 1993 * is expected to be set by the glue code. 1994 */ 1995 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 1996 return extcon_get_extcon_dev(name); 1997 1998 /* 1999 * Check explicitly if "usb-role-switch" is used since 2000 * extcon_find_edev_by_node() can not be used to check the absence of 2001 * an extcon device. In the absence of an device it will always return 2002 * EPROBE_DEFER. 2003 */ 2004 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 2005 device_property_read_bool(dev, "usb-role-switch")) 2006 return NULL; 2007 2008 /* 2009 * Try to get an extcon device from the USB PHY controller's "port" 2010 * node. Check if it has the "port" node first, to avoid printing the 2011 * error message from underlying code, as it's a valid case: extcon 2012 * device (and "port" node) may be missing in case of "usb-role-switch" 2013 * or OTG mode. 2014 */ 2015 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 2016 if (of_graph_is_present(np_phy)) { 2017 struct device_node *np_conn; 2018 2019 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 2020 if (np_conn) 2021 edev = extcon_find_edev_by_node(np_conn); 2022 of_node_put(np_conn); 2023 } 2024 of_node_put(np_phy); 2025 2026 return edev; 2027 } 2028 2029 static int dwc3_get_clocks(struct dwc3 *dwc) 2030 { 2031 struct device *dev = dwc->dev; 2032 2033 if (!dev->of_node) 2034 return 0; 2035 2036 /* 2037 * Clocks are optional, but new DT platforms should support all clocks 2038 * as required by the DT-binding. 2039 * Some devices have different clock names in legacy device trees, 2040 * check for them to retain backwards compatibility. 2041 */ 2042 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 2043 if (IS_ERR(dwc->bus_clk)) { 2044 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2045 "could not get bus clock\n"); 2046 } 2047 2048 if (dwc->bus_clk == NULL) { 2049 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 2050 if (IS_ERR(dwc->bus_clk)) { 2051 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2052 "could not get bus clock\n"); 2053 } 2054 } 2055 2056 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 2057 if (IS_ERR(dwc->ref_clk)) { 2058 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2059 "could not get ref clock\n"); 2060 } 2061 2062 if (dwc->ref_clk == NULL) { 2063 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 2064 if (IS_ERR(dwc->ref_clk)) { 2065 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2066 "could not get ref clock\n"); 2067 } 2068 } 2069 2070 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 2071 if (IS_ERR(dwc->susp_clk)) { 2072 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2073 "could not get suspend clock\n"); 2074 } 2075 2076 if (dwc->susp_clk == NULL) { 2077 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 2078 if (IS_ERR(dwc->susp_clk)) { 2079 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2080 "could not get suspend clock\n"); 2081 } 2082 } 2083 2084 /* specific to Rockchip RK3588 */ 2085 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 2086 if (IS_ERR(dwc->utmi_clk)) { 2087 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 2088 "could not get utmi clock\n"); 2089 } 2090 2091 /* specific to Rockchip RK3588 */ 2092 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 2093 if (IS_ERR(dwc->pipe_clk)) { 2094 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 2095 "could not get pipe clock\n"); 2096 } 2097 2098 return 0; 2099 } 2100 2101 static int dwc3_get_num_ports(struct dwc3 *dwc) 2102 { 2103 void __iomem *base; 2104 u8 major_revision; 2105 u32 offset; 2106 u32 val; 2107 2108 /* 2109 * Remap xHCI address space to access XHCI ext cap regs since it is 2110 * needed to get information on number of ports present. 2111 */ 2112 base = ioremap(dwc->xhci_resources[0].start, 2113 resource_size(&dwc->xhci_resources[0])); 2114 if (!base) 2115 return -ENOMEM; 2116 2117 offset = 0; 2118 do { 2119 offset = xhci_find_next_ext_cap(base, offset, 2120 XHCI_EXT_CAPS_PROTOCOL); 2121 if (!offset) 2122 break; 2123 2124 val = readl(base + offset); 2125 major_revision = XHCI_EXT_PORT_MAJOR(val); 2126 2127 val = readl(base + offset + 0x08); 2128 if (major_revision == 0x03) { 2129 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val); 2130 } else if (major_revision <= 0x02) { 2131 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val); 2132 } else { 2133 dev_warn(dwc->dev, "unrecognized port major revision %d\n", 2134 major_revision); 2135 } 2136 } while (1); 2137 2138 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n", 2139 dwc->num_usb2_ports, dwc->num_usb3_ports); 2140 2141 iounmap(base); 2142 2143 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS || 2144 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS) 2145 return -EINVAL; 2146 2147 return 0; 2148 } 2149 2150 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) 2151 { 2152 struct power_supply *usb_psy; 2153 const char *usb_psy_name; 2154 int ret; 2155 2156 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); 2157 if (ret < 0) 2158 return NULL; 2159 2160 usb_psy = power_supply_get_by_name(usb_psy_name); 2161 if (!usb_psy) 2162 return ERR_PTR(-EPROBE_DEFER); 2163 2164 return usb_psy; 2165 } 2166 2167 int dwc3_core_probe(const struct dwc3_probe_data *data) 2168 { 2169 struct dwc3 *dwc = data->dwc; 2170 struct device *dev = dwc->dev; 2171 struct resource dwc_res; 2172 unsigned int hw_mode; 2173 void __iomem *regs; 2174 struct resource *res = data->res; 2175 int ret; 2176 2177 dwc->xhci_resources[0].start = res->start; 2178 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 2179 DWC3_XHCI_REGS_END; 2180 dwc->xhci_resources[0].flags = res->flags; 2181 dwc->xhci_resources[0].name = res->name; 2182 2183 /* 2184 * Request memory region but exclude xHCI regs, 2185 * since it will be requested by the xhci-plat driver. 2186 */ 2187 dwc_res = *res; 2188 dwc_res.start += DWC3_GLOBALS_REGS_START; 2189 2190 if (dev->of_node) { 2191 struct device_node *parent = of_get_parent(dev->of_node); 2192 2193 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 2194 dwc_res.start -= DWC3_GLOBALS_REGS_START; 2195 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 2196 } 2197 2198 of_node_put(parent); 2199 } 2200 2201 regs = devm_ioremap_resource(dev, &dwc_res); 2202 if (IS_ERR(regs)) 2203 return PTR_ERR(regs); 2204 2205 dwc->regs = regs; 2206 dwc->regs_size = resource_size(&dwc_res); 2207 2208 dwc3_get_properties(dwc); 2209 2210 dwc3_get_software_properties(dwc); 2211 2212 dwc->usb_psy = dwc3_get_usb_power_supply(dwc); 2213 if (IS_ERR(dwc->usb_psy)) 2214 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); 2215 2216 if (!data->ignore_clocks_and_resets) { 2217 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 2218 if (IS_ERR(dwc->reset)) { 2219 ret = PTR_ERR(dwc->reset); 2220 goto err_put_psy; 2221 } 2222 2223 ret = dwc3_get_clocks(dwc); 2224 if (ret) 2225 goto err_put_psy; 2226 } 2227 2228 ret = reset_control_deassert(dwc->reset); 2229 if (ret) 2230 goto err_put_psy; 2231 2232 ret = dwc3_clk_enable(dwc); 2233 if (ret) 2234 goto err_assert_reset; 2235 2236 if (!dwc3_core_is_valid(dwc)) { 2237 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 2238 ret = -ENODEV; 2239 goto err_disable_clks; 2240 } 2241 2242 dev_set_drvdata(dev, dwc); 2243 dwc3_cache_hwparams(dwc); 2244 2245 if (!dev_is_pci(dwc->sysdev) && 2246 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 2247 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 2248 if (ret) 2249 goto err_disable_clks; 2250 } 2251 2252 /* 2253 * Currently only DWC3 controllers that are host-only capable 2254 * can have more than one port. 2255 */ 2256 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 2257 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 2258 ret = dwc3_get_num_ports(dwc); 2259 if (ret) 2260 goto err_disable_clks; 2261 } else { 2262 dwc->num_usb2_ports = 1; 2263 dwc->num_usb3_ports = 1; 2264 } 2265 2266 spin_lock_init(&dwc->lock); 2267 mutex_init(&dwc->mutex); 2268 2269 pm_runtime_get_noresume(dev); 2270 pm_runtime_set_active(dev); 2271 pm_runtime_use_autosuspend(dev); 2272 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 2273 pm_runtime_enable(dev); 2274 2275 pm_runtime_forbid(dev); 2276 2277 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 2278 if (ret) { 2279 dev_err(dwc->dev, "failed to allocate event buffers\n"); 2280 ret = -ENOMEM; 2281 goto err_allow_rpm; 2282 } 2283 2284 dwc->edev = dwc3_get_extcon(dwc); 2285 if (IS_ERR(dwc->edev)) { 2286 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 2287 goto err_free_event_buffers; 2288 } 2289 2290 ret = dwc3_get_dr_mode(dwc); 2291 if (ret) 2292 goto err_free_event_buffers; 2293 2294 ret = dwc3_core_init(dwc); 2295 if (ret) { 2296 dev_err_probe(dev, ret, "failed to initialize core\n"); 2297 goto err_free_event_buffers; 2298 } 2299 2300 dwc3_check_params(dwc); 2301 dwc3_debugfs_init(dwc); 2302 2303 ret = dwc3_core_init_mode(dwc); 2304 if (ret) 2305 goto err_exit_debugfs; 2306 2307 pm_runtime_put(dev); 2308 2309 dma_set_max_seg_size(dev, UINT_MAX); 2310 2311 return 0; 2312 2313 err_exit_debugfs: 2314 dwc3_debugfs_exit(dwc); 2315 dwc3_event_buffers_cleanup(dwc); 2316 dwc3_phy_power_off(dwc); 2317 dwc3_phy_exit(dwc); 2318 dwc3_ulpi_exit(dwc); 2319 err_free_event_buffers: 2320 dwc3_free_event_buffers(dwc); 2321 err_allow_rpm: 2322 pm_runtime_allow(dev); 2323 pm_runtime_disable(dev); 2324 pm_runtime_dont_use_autosuspend(dev); 2325 pm_runtime_set_suspended(dev); 2326 pm_runtime_put_noidle(dev); 2327 err_disable_clks: 2328 dwc3_clk_disable(dwc); 2329 err_assert_reset: 2330 reset_control_assert(dwc->reset); 2331 err_put_psy: 2332 if (dwc->usb_psy) 2333 power_supply_put(dwc->usb_psy); 2334 2335 return ret; 2336 } 2337 EXPORT_SYMBOL_GPL(dwc3_core_probe); 2338 2339 static int dwc3_probe(struct platform_device *pdev) 2340 { 2341 struct dwc3_probe_data probe_data = {}; 2342 struct resource *res; 2343 struct dwc3 *dwc; 2344 2345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2346 if (!res) { 2347 dev_err(&pdev->dev, "missing memory resource\n"); 2348 return -ENODEV; 2349 } 2350 2351 dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL); 2352 if (!dwc) 2353 return -ENOMEM; 2354 2355 dwc->dev = &pdev->dev; 2356 dwc->glue_ops = NULL; 2357 2358 probe_data.dwc = dwc; 2359 probe_data.res = res; 2360 2361 return dwc3_core_probe(&probe_data); 2362 } 2363 2364 void dwc3_core_remove(struct dwc3 *dwc) 2365 { 2366 pm_runtime_get_sync(dwc->dev); 2367 2368 dwc3_core_exit_mode(dwc); 2369 dwc3_debugfs_exit(dwc); 2370 2371 dwc3_core_exit(dwc); 2372 dwc3_ulpi_exit(dwc); 2373 2374 pm_runtime_allow(dwc->dev); 2375 pm_runtime_disable(dwc->dev); 2376 pm_runtime_dont_use_autosuspend(dwc->dev); 2377 pm_runtime_put_noidle(dwc->dev); 2378 /* 2379 * HACK: Clear the driver data, which is currently accessed by parent 2380 * glue drivers, before allowing the parent to suspend. 2381 */ 2382 dev_set_drvdata(dwc->dev, NULL); 2383 pm_runtime_set_suspended(dwc->dev); 2384 2385 dwc3_free_event_buffers(dwc); 2386 2387 if (dwc->usb_psy) 2388 power_supply_put(dwc->usb_psy); 2389 } 2390 EXPORT_SYMBOL_GPL(dwc3_core_remove); 2391 2392 static void dwc3_remove(struct platform_device *pdev) 2393 { 2394 dwc3_core_remove(platform_get_drvdata(pdev)); 2395 } 2396 2397 #ifdef CONFIG_PM 2398 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2399 { 2400 int ret; 2401 2402 ret = reset_control_deassert(dwc->reset); 2403 if (ret) 2404 return ret; 2405 2406 ret = dwc3_clk_enable(dwc); 2407 if (ret) 2408 goto assert_reset; 2409 2410 ret = dwc3_core_init(dwc); 2411 if (ret) 2412 goto disable_clks; 2413 2414 return 0; 2415 2416 disable_clks: 2417 dwc3_clk_disable(dwc); 2418 assert_reset: 2419 reset_control_assert(dwc->reset); 2420 2421 return ret; 2422 } 2423 2424 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2425 { 2426 u32 reg; 2427 int i; 2428 int ret; 2429 2430 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { 2431 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & 2432 DWC3_GUSB2PHYCFG_SUSPHY) || 2433 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & 2434 DWC3_GUSB3PIPECTL_SUSPHY); 2435 /* 2436 * TI AM62 platform requires SUSPHY to be 2437 * enabled for system suspend to work. 2438 */ 2439 if (!dwc->susphy_state) 2440 dwc3_enable_susphy(dwc, true); 2441 } 2442 2443 switch (dwc->current_dr_role) { 2444 case DWC3_GCTL_PRTCAP_DEVICE: 2445 if (pm_runtime_suspended(dwc->dev)) 2446 break; 2447 ret = dwc3_gadget_suspend(dwc); 2448 if (ret) 2449 return ret; 2450 synchronize_irq(dwc->irq_gadget); 2451 dwc3_core_exit(dwc); 2452 break; 2453 case DWC3_GCTL_PRTCAP_HOST: 2454 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2455 dwc3_core_exit(dwc); 2456 break; 2457 } 2458 2459 /* Let controller to suspend HSPHY before PHY driver suspends */ 2460 if (dwc->dis_u2_susphy_quirk || 2461 dwc->dis_enblslpm_quirk) { 2462 for (i = 0; i < dwc->num_usb2_ports; i++) { 2463 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2464 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2465 DWC3_GUSB2PHYCFG_SUSPHY; 2466 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2467 } 2468 2469 /* Give some time for USB2 PHY to suspend */ 2470 usleep_range(5000, 6000); 2471 } 2472 2473 for (i = 0; i < dwc->num_usb2_ports; i++) 2474 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]); 2475 for (i = 0; i < dwc->num_usb3_ports; i++) 2476 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]); 2477 break; 2478 case DWC3_GCTL_PRTCAP_OTG: 2479 /* do nothing during runtime_suspend */ 2480 if (PMSG_IS_AUTO(msg)) 2481 break; 2482 2483 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2484 ret = dwc3_gadget_suspend(dwc); 2485 if (ret) 2486 return ret; 2487 synchronize_irq(dwc->irq_gadget); 2488 } 2489 2490 dwc3_otg_exit(dwc); 2491 dwc3_core_exit(dwc); 2492 break; 2493 default: 2494 /* do nothing */ 2495 break; 2496 } 2497 2498 return 0; 2499 } 2500 2501 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2502 { 2503 int ret; 2504 u32 reg; 2505 int i; 2506 2507 switch (dwc->current_dr_role) { 2508 case DWC3_GCTL_PRTCAP_DEVICE: 2509 ret = dwc3_core_init_for_resume(dwc); 2510 if (ret) 2511 return ret; 2512 2513 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 2514 dwc3_gadget_resume(dwc); 2515 break; 2516 case DWC3_GCTL_PRTCAP_HOST: 2517 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2518 ret = dwc3_core_init_for_resume(dwc); 2519 if (ret) 2520 return ret; 2521 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); 2522 break; 2523 } 2524 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2525 for (i = 0; i < dwc->num_usb2_ports; i++) { 2526 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2527 if (dwc->dis_u2_susphy_quirk) 2528 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2529 2530 if (dwc->dis_enblslpm_quirk) 2531 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2532 2533 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2534 } 2535 2536 for (i = 0; i < dwc->num_usb2_ports; i++) 2537 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]); 2538 for (i = 0; i < dwc->num_usb3_ports; i++) 2539 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]); 2540 break; 2541 case DWC3_GCTL_PRTCAP_OTG: 2542 /* nothing to do on runtime_resume */ 2543 if (PMSG_IS_AUTO(msg)) 2544 break; 2545 2546 ret = dwc3_core_init_for_resume(dwc); 2547 if (ret) 2548 return ret; 2549 2550 dwc3_set_prtcap(dwc, dwc->current_dr_role, true); 2551 2552 dwc3_otg_init(dwc); 2553 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2554 dwc3_otg_host_init(dwc); 2555 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2556 dwc3_gadget_resume(dwc); 2557 } 2558 2559 break; 2560 default: 2561 /* do nothing */ 2562 break; 2563 } 2564 2565 if (!PMSG_IS_AUTO(msg)) { 2566 /* restore SUSPHY state to that before system suspend. */ 2567 dwc3_enable_susphy(dwc, dwc->susphy_state); 2568 } 2569 2570 return 0; 2571 } 2572 2573 static int dwc3_runtime_checks(struct dwc3 *dwc) 2574 { 2575 switch (dwc->current_dr_role) { 2576 case DWC3_GCTL_PRTCAP_DEVICE: 2577 if (dwc->connected) 2578 return -EBUSY; 2579 break; 2580 case DWC3_GCTL_PRTCAP_HOST: 2581 default: 2582 /* do nothing */ 2583 break; 2584 } 2585 2586 return 0; 2587 } 2588 2589 int dwc3_runtime_suspend(struct dwc3 *dwc) 2590 { 2591 int ret; 2592 2593 if (dwc3_runtime_checks(dwc)) 2594 return -EBUSY; 2595 2596 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2597 if (ret) 2598 return ret; 2599 2600 return 0; 2601 } 2602 EXPORT_SYMBOL_GPL(dwc3_runtime_suspend); 2603 2604 int dwc3_runtime_resume(struct dwc3 *dwc) 2605 { 2606 struct device *dev = dwc->dev; 2607 int ret; 2608 2609 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2610 if (ret) 2611 return ret; 2612 2613 switch (dwc->current_dr_role) { 2614 case DWC3_GCTL_PRTCAP_DEVICE: 2615 if (dwc->pending_events) { 2616 pm_runtime_put(dev); 2617 dwc->pending_events = false; 2618 enable_irq(dwc->irq_gadget); 2619 } 2620 break; 2621 case DWC3_GCTL_PRTCAP_HOST: 2622 default: 2623 /* do nothing */ 2624 break; 2625 } 2626 2627 pm_runtime_mark_last_busy(dev); 2628 2629 return 0; 2630 } 2631 EXPORT_SYMBOL_GPL(dwc3_runtime_resume); 2632 2633 int dwc3_runtime_idle(struct dwc3 *dwc) 2634 { 2635 struct device *dev = dwc->dev; 2636 2637 switch (dwc->current_dr_role) { 2638 case DWC3_GCTL_PRTCAP_DEVICE: 2639 if (dwc3_runtime_checks(dwc)) 2640 return -EBUSY; 2641 break; 2642 case DWC3_GCTL_PRTCAP_HOST: 2643 default: 2644 /* do nothing */ 2645 break; 2646 } 2647 2648 pm_runtime_mark_last_busy(dev); 2649 pm_runtime_autosuspend(dev); 2650 2651 return 0; 2652 } 2653 EXPORT_SYMBOL_GPL(dwc3_runtime_idle); 2654 2655 static int dwc3_plat_runtime_suspend(struct device *dev) 2656 { 2657 return dwc3_runtime_suspend(dev_get_drvdata(dev)); 2658 } 2659 2660 static int dwc3_plat_runtime_resume(struct device *dev) 2661 { 2662 return dwc3_runtime_resume(dev_get_drvdata(dev)); 2663 } 2664 2665 static int dwc3_plat_runtime_idle(struct device *dev) 2666 { 2667 return dwc3_runtime_idle(dev_get_drvdata(dev)); 2668 } 2669 #endif /* CONFIG_PM */ 2670 2671 #ifdef CONFIG_PM_SLEEP 2672 int dwc3_pm_suspend(struct dwc3 *dwc) 2673 { 2674 struct device *dev = dwc->dev; 2675 int ret; 2676 2677 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2678 if (ret) 2679 return ret; 2680 2681 pinctrl_pm_select_sleep_state(dev); 2682 2683 return 0; 2684 } 2685 EXPORT_SYMBOL_GPL(dwc3_pm_suspend); 2686 2687 int dwc3_pm_resume(struct dwc3 *dwc) 2688 { 2689 struct device *dev = dwc->dev; 2690 int ret = 0; 2691 2692 pinctrl_pm_select_default_state(dev); 2693 2694 pm_runtime_disable(dev); 2695 ret = pm_runtime_set_active(dev); 2696 if (ret) 2697 goto out; 2698 2699 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2700 if (ret) 2701 pm_runtime_set_suspended(dev); 2702 2703 out: 2704 pm_runtime_enable(dev); 2705 2706 return ret; 2707 } 2708 EXPORT_SYMBOL_GPL(dwc3_pm_resume); 2709 2710 void dwc3_pm_complete(struct dwc3 *dwc) 2711 { 2712 u32 reg; 2713 2714 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2715 dwc->dis_split_quirk) { 2716 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2717 reg |= DWC3_GUCTL3_SPLITDISABLE; 2718 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2719 } 2720 } 2721 EXPORT_SYMBOL_GPL(dwc3_pm_complete); 2722 2723 int dwc3_pm_prepare(struct dwc3 *dwc) 2724 { 2725 struct device *dev = dwc->dev; 2726 2727 /* 2728 * Indicate to the PM core that it may safely leave the device in 2729 * runtime suspend if runtime-suspended already in device mode. 2730 */ 2731 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE && 2732 pm_runtime_suspended(dev) && 2733 !dev_pinctrl(dev)) 2734 return 1; 2735 2736 return 0; 2737 } 2738 EXPORT_SYMBOL_GPL(dwc3_pm_prepare); 2739 2740 static int dwc3_plat_suspend(struct device *dev) 2741 { 2742 return dwc3_pm_suspend(dev_get_drvdata(dev)); 2743 } 2744 2745 static int dwc3_plat_resume(struct device *dev) 2746 { 2747 return dwc3_pm_resume(dev_get_drvdata(dev)); 2748 } 2749 2750 static void dwc3_plat_complete(struct device *dev) 2751 { 2752 dwc3_pm_complete(dev_get_drvdata(dev)); 2753 } 2754 2755 static int dwc3_plat_prepare(struct device *dev) 2756 { 2757 return dwc3_pm_prepare(dev_get_drvdata(dev)); 2758 } 2759 #else 2760 #define dwc3_plat_complete NULL 2761 #define dwc3_plat_prepare NULL 2762 #endif /* CONFIG_PM_SLEEP */ 2763 2764 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2765 SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume) 2766 .complete = dwc3_plat_complete, 2767 .prepare = dwc3_plat_prepare, 2768 /* 2769 * Runtime suspend halts the controller on disconnection. It relies on 2770 * platforms with custom connection notification to start the controller 2771 * again. 2772 */ 2773 SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume, 2774 dwc3_plat_runtime_idle) 2775 }; 2776 2777 #ifdef CONFIG_OF 2778 static const struct of_device_id of_dwc3_match[] = { 2779 { 2780 .compatible = "snps,dwc3" 2781 }, 2782 { 2783 .compatible = "synopsys,dwc3" 2784 }, 2785 { }, 2786 }; 2787 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2788 #endif 2789 2790 #ifdef CONFIG_ACPI 2791 2792 #define ACPI_ID_INTEL_BSW "808622B7" 2793 2794 static const struct acpi_device_id dwc3_acpi_match[] = { 2795 { ACPI_ID_INTEL_BSW, 0 }, 2796 { }, 2797 }; 2798 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2799 #endif 2800 2801 static struct platform_driver dwc3_driver = { 2802 .probe = dwc3_probe, 2803 .remove = dwc3_remove, 2804 .driver = { 2805 .name = "dwc3", 2806 .of_match_table = of_match_ptr(of_dwc3_match), 2807 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2808 .pm = &dwc3_dev_pm_ops, 2809 }, 2810 }; 2811 2812 module_platform_driver(dwc3_driver); 2813 2814 MODULE_ALIAS("platform:dwc3"); 2815 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2816 MODULE_LICENSE("GPL v2"); 2817 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2818