1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_graph.h> 27 #include <linux/acpi.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/pinctrl/devinfo.h> 30 #include <linux/reset.h> 31 #include <linux/bitfield.h> 32 33 #include <linux/usb/ch9.h> 34 #include <linux/usb/gadget.h> 35 #include <linux/usb/of.h> 36 #include <linux/usb/otg.h> 37 38 #include "core.h" 39 #include "gadget.h" 40 #include "glue.h" 41 #include "io.h" 42 43 #include "debug.h" 44 #include "../host/xhci-ext-caps.h" 45 46 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 47 48 /** 49 * dwc3_get_dr_mode - Validates and sets dr_mode 50 * @dwc: pointer to our context structure 51 */ 52 static int dwc3_get_dr_mode(struct dwc3 *dwc) 53 { 54 enum usb_dr_mode mode; 55 struct device *dev = dwc->dev; 56 unsigned int hw_mode; 57 58 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 59 dwc->dr_mode = USB_DR_MODE_OTG; 60 61 mode = dwc->dr_mode; 62 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 63 64 switch (hw_mode) { 65 case DWC3_GHWPARAMS0_MODE_GADGET: 66 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 67 dev_err(dev, 68 "Controller does not support host mode.\n"); 69 return -EINVAL; 70 } 71 mode = USB_DR_MODE_PERIPHERAL; 72 break; 73 case DWC3_GHWPARAMS0_MODE_HOST: 74 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 75 dev_err(dev, 76 "Controller does not support device mode.\n"); 77 return -EINVAL; 78 } 79 mode = USB_DR_MODE_HOST; 80 break; 81 default: 82 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 83 mode = USB_DR_MODE_HOST; 84 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 85 mode = USB_DR_MODE_PERIPHERAL; 86 87 /* 88 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 89 * mode. If the controller supports DRD but the dr_mode is not 90 * specified or set to OTG, then set the mode to peripheral. 91 */ 92 if (mode == USB_DR_MODE_OTG && !dwc->edev && 93 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 94 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 95 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 96 mode = USB_DR_MODE_PERIPHERAL; 97 } 98 99 if (mode != dwc->dr_mode) { 100 dev_warn(dev, 101 "Configuration mismatch. dr_mode forced to %s\n", 102 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 103 104 dwc->dr_mode = mode; 105 } 106 107 return 0; 108 } 109 110 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) 111 { 112 u32 reg; 113 int i; 114 115 for (i = 0; i < dwc->num_usb3_ports; i++) { 116 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i)); 117 if (enable && !dwc->dis_u3_susphy_quirk) 118 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 119 else 120 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 121 122 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg); 123 } 124 125 for (i = 0; i < dwc->num_usb2_ports; i++) { 126 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 127 if (enable && !dwc->dis_u2_susphy_quirk) 128 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 129 else 130 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 131 132 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 133 } 134 } 135 136 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) 137 { 138 unsigned int hw_mode; 139 u32 reg; 140 141 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 142 143 /* 144 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and 145 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, 146 * and they can be set after core initialization. 147 */ 148 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 149 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { 150 if (DWC3_GCTL_PRTCAP(reg) != mode) 151 dwc3_enable_susphy(dwc, false); 152 } 153 154 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 155 reg |= DWC3_GCTL_PRTCAPDIR(mode); 156 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 157 158 dwc->current_dr_role = mode; 159 trace_dwc3_set_prtcap(mode); 160 } 161 162 static void __dwc3_set_mode(struct work_struct *work) 163 { 164 struct dwc3 *dwc = work_to_dwc(work); 165 unsigned long flags; 166 int ret; 167 u32 reg; 168 u32 desired_dr_role; 169 int i; 170 171 mutex_lock(&dwc->mutex); 172 spin_lock_irqsave(&dwc->lock, flags); 173 desired_dr_role = dwc->desired_dr_role; 174 spin_unlock_irqrestore(&dwc->lock, flags); 175 176 pm_runtime_get_sync(dwc->dev); 177 178 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 179 dwc3_otg_update(dwc, 0); 180 181 if (!desired_dr_role) 182 goto out; 183 184 if (desired_dr_role == dwc->current_dr_role) 185 goto out; 186 187 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 188 goto out; 189 190 switch (dwc->current_dr_role) { 191 case DWC3_GCTL_PRTCAP_HOST: 192 dwc3_host_exit(dwc); 193 break; 194 case DWC3_GCTL_PRTCAP_DEVICE: 195 dwc3_gadget_exit(dwc); 196 dwc3_event_buffers_cleanup(dwc); 197 break; 198 case DWC3_GCTL_PRTCAP_OTG: 199 dwc3_otg_exit(dwc); 200 spin_lock_irqsave(&dwc->lock, flags); 201 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 202 spin_unlock_irqrestore(&dwc->lock, flags); 203 dwc3_otg_update(dwc, 1); 204 break; 205 default: 206 break; 207 } 208 209 /* 210 * When current_dr_role is not set, there's no role switching. 211 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 212 */ 213 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 214 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 215 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 216 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 217 reg |= DWC3_GCTL_CORESOFTRESET; 218 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 219 220 /* 221 * Wait for internal clocks to synchronized. DWC_usb31 and 222 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 223 * keep it consistent across different IPs, let's wait up to 224 * 100ms before clearing GCTL.CORESOFTRESET. 225 */ 226 msleep(100); 227 228 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 229 reg &= ~DWC3_GCTL_CORESOFTRESET; 230 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 231 } 232 233 spin_lock_irqsave(&dwc->lock, flags); 234 235 dwc3_set_prtcap(dwc, desired_dr_role, false); 236 237 spin_unlock_irqrestore(&dwc->lock, flags); 238 239 switch (desired_dr_role) { 240 case DWC3_GCTL_PRTCAP_HOST: 241 ret = dwc3_host_init(dwc); 242 if (ret) { 243 dev_err(dwc->dev, "failed to initialize host\n"); 244 } else { 245 if (dwc->usb2_phy) 246 otg_set_vbus(dwc->usb2_phy->otg, true); 247 248 for (i = 0; i < dwc->num_usb2_ports; i++) 249 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 250 for (i = 0; i < dwc->num_usb3_ports; i++) 251 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 252 253 if (dwc->dis_split_quirk) { 254 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 255 reg |= DWC3_GUCTL3_SPLITDISABLE; 256 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 257 } 258 } 259 break; 260 case DWC3_GCTL_PRTCAP_DEVICE: 261 dwc3_core_soft_reset(dwc); 262 263 dwc3_event_buffers_setup(dwc); 264 265 if (dwc->usb2_phy) 266 otg_set_vbus(dwc->usb2_phy->otg, false); 267 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 268 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 269 270 ret = dwc3_gadget_init(dwc); 271 if (ret) 272 dev_err(dwc->dev, "failed to initialize peripheral\n"); 273 break; 274 case DWC3_GCTL_PRTCAP_OTG: 275 dwc3_otg_init(dwc); 276 dwc3_otg_update(dwc, 0); 277 break; 278 default: 279 break; 280 } 281 282 out: 283 pm_runtime_mark_last_busy(dwc->dev); 284 pm_runtime_put_autosuspend(dwc->dev); 285 mutex_unlock(&dwc->mutex); 286 } 287 288 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 289 { 290 unsigned long flags; 291 292 if (dwc->dr_mode != USB_DR_MODE_OTG) 293 return; 294 295 spin_lock_irqsave(&dwc->lock, flags); 296 dwc->desired_dr_role = mode; 297 spin_unlock_irqrestore(&dwc->lock, flags); 298 299 queue_work(system_freezable_wq, &dwc->drd_work); 300 } 301 302 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 303 { 304 struct dwc3 *dwc = dep->dwc; 305 u32 reg; 306 307 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 308 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 309 DWC3_GDBGFIFOSPACE_TYPE(type)); 310 311 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 312 313 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 314 } 315 316 /** 317 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 318 * @dwc: pointer to our context structure 319 */ 320 int dwc3_core_soft_reset(struct dwc3 *dwc) 321 { 322 u32 reg; 323 int retries = 1000; 324 325 /* 326 * We're resetting only the device side because, if we're in host mode, 327 * XHCI driver will reset the host block. If dwc3 was configured for 328 * host-only mode, then we can return early. 329 */ 330 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 331 return 0; 332 333 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 334 reg |= DWC3_DCTL_CSFTRST; 335 reg &= ~DWC3_DCTL_RUN_STOP; 336 dwc3_gadget_dctl_write_safe(dwc, reg); 337 338 /* 339 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 340 * is cleared only after all the clocks are synchronized. This can 341 * take a little more than 50ms. Set the polling rate at 20ms 342 * for 10 times instead. 343 */ 344 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 345 retries = 10; 346 347 do { 348 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 349 if (!(reg & DWC3_DCTL_CSFTRST)) 350 goto done; 351 352 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 353 msleep(20); 354 else 355 udelay(1); 356 } while (--retries); 357 358 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 359 return -ETIMEDOUT; 360 361 done: 362 /* 363 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 364 * is cleared, we must wait at least 50ms before accessing the PHY 365 * domain (synchronization delay). 366 */ 367 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 368 msleep(50); 369 370 return 0; 371 } 372 373 /* 374 * dwc3_frame_length_adjustment - Adjusts frame length if required 375 * @dwc3: Pointer to our controller context structure 376 */ 377 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 378 { 379 u32 reg; 380 u32 dft; 381 382 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 383 return; 384 385 if (dwc->fladj == 0) 386 return; 387 388 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 389 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 390 if (dft != dwc->fladj) { 391 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 392 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 393 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 394 } 395 } 396 397 /** 398 * dwc3_ref_clk_period - Reference clock period configuration 399 * Default reference clock period depends on hardware 400 * configuration. For systems with reference clock that differs 401 * from the default, this will set clock period in DWC3_GUCTL 402 * register. 403 * @dwc: Pointer to our controller context structure 404 */ 405 static void dwc3_ref_clk_period(struct dwc3 *dwc) 406 { 407 unsigned long period; 408 unsigned long fladj; 409 unsigned long decr; 410 unsigned long rate; 411 u32 reg; 412 413 if (dwc->ref_clk) { 414 rate = clk_get_rate(dwc->ref_clk); 415 if (!rate) 416 return; 417 period = NSEC_PER_SEC / rate; 418 } else if (dwc->ref_clk_per) { 419 period = dwc->ref_clk_per; 420 rate = NSEC_PER_SEC / period; 421 } else { 422 return; 423 } 424 425 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 426 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 427 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 428 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 429 430 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 431 return; 432 433 /* 434 * The calculation below is 435 * 436 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 437 * 438 * but rearranged for fixed-point arithmetic. The division must be 439 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 440 * neither does rate * period). 441 * 442 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 443 * nanoseconds of error caused by the truncation which happened during 444 * the division when calculating rate or period (whichever one was 445 * derived from the other). We first calculate the relative error, then 446 * scale it to units of 8 ppm. 447 */ 448 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 449 fladj -= 125000; 450 451 /* 452 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 453 */ 454 decr = 480000000 / rate; 455 456 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 457 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 458 & ~DWC3_GFLADJ_240MHZDECR 459 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 460 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 461 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 462 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 463 464 if (dwc->gfladj_refclk_lpm_sel) 465 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 466 467 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 468 } 469 470 /** 471 * dwc3_free_one_event_buffer - Frees one event buffer 472 * @dwc: Pointer to our controller context structure 473 * @evt: Pointer to event buffer to be freed 474 */ 475 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 476 struct dwc3_event_buffer *evt) 477 { 478 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 479 } 480 481 /** 482 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 483 * @dwc: Pointer to our controller context structure 484 * @length: size of the event buffer 485 * 486 * Returns a pointer to the allocated event buffer structure on success 487 * otherwise ERR_PTR(errno). 488 */ 489 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 490 unsigned int length) 491 { 492 struct dwc3_event_buffer *evt; 493 494 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 495 if (!evt) 496 return ERR_PTR(-ENOMEM); 497 498 evt->dwc = dwc; 499 evt->length = length; 500 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 501 if (!evt->cache) 502 return ERR_PTR(-ENOMEM); 503 504 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 505 &evt->dma, GFP_KERNEL); 506 if (!evt->buf) 507 return ERR_PTR(-ENOMEM); 508 509 return evt; 510 } 511 512 /** 513 * dwc3_free_event_buffers - frees all allocated event buffers 514 * @dwc: Pointer to our controller context structure 515 */ 516 static void dwc3_free_event_buffers(struct dwc3 *dwc) 517 { 518 struct dwc3_event_buffer *evt; 519 520 evt = dwc->ev_buf; 521 if (evt) 522 dwc3_free_one_event_buffer(dwc, evt); 523 } 524 525 /** 526 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 527 * @dwc: pointer to our controller context structure 528 * @length: size of event buffer 529 * 530 * Returns 0 on success otherwise negative errno. In the error case, dwc 531 * may contain some buffers allocated but not all which were requested. 532 */ 533 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 534 { 535 struct dwc3_event_buffer *evt; 536 unsigned int hw_mode; 537 538 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 539 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 540 dwc->ev_buf = NULL; 541 return 0; 542 } 543 544 evt = dwc3_alloc_one_event_buffer(dwc, length); 545 if (IS_ERR(evt)) { 546 dev_err(dwc->dev, "can't allocate event buffer\n"); 547 return PTR_ERR(evt); 548 } 549 dwc->ev_buf = evt; 550 551 return 0; 552 } 553 554 /** 555 * dwc3_event_buffers_setup - setup our allocated event buffers 556 * @dwc: pointer to our controller context structure 557 * 558 * Returns 0 on success otherwise negative errno. 559 */ 560 int dwc3_event_buffers_setup(struct dwc3 *dwc) 561 { 562 struct dwc3_event_buffer *evt; 563 u32 reg; 564 565 if (!dwc->ev_buf) 566 return 0; 567 568 evt = dwc->ev_buf; 569 evt->lpos = 0; 570 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 571 lower_32_bits(evt->dma)); 572 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 573 upper_32_bits(evt->dma)); 574 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 575 DWC3_GEVNTSIZ_SIZE(evt->length)); 576 577 /* Clear any stale event */ 578 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 579 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 580 return 0; 581 } 582 583 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 584 { 585 struct dwc3_event_buffer *evt; 586 u32 reg; 587 588 if (!dwc->ev_buf) 589 return; 590 /* 591 * Exynos platforms may not be able to access event buffer if the 592 * controller failed to halt on dwc3_core_exit(). 593 */ 594 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 595 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 596 return; 597 598 evt = dwc->ev_buf; 599 600 evt->lpos = 0; 601 602 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 603 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 604 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 605 | DWC3_GEVNTSIZ_SIZE(0)); 606 607 /* Clear any stale event */ 608 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 609 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 610 } 611 612 static void dwc3_core_num_eps(struct dwc3 *dwc) 613 { 614 struct dwc3_hwparams *parms = &dwc->hwparams; 615 616 dwc->num_eps = DWC3_NUM_EPS(parms); 617 } 618 619 static void dwc3_cache_hwparams(struct dwc3 *dwc) 620 { 621 struct dwc3_hwparams *parms = &dwc->hwparams; 622 623 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 624 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 625 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 626 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 627 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 628 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 629 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 630 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 631 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 632 633 if (DWC3_IP_IS(DWC32)) 634 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 635 } 636 637 static void dwc3_config_soc_bus(struct dwc3 *dwc) 638 { 639 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 640 u32 reg; 641 642 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 643 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0); 644 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo); 645 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg); 646 } 647 } 648 649 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 650 { 651 int intf; 652 int ret = 0; 653 654 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 655 656 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 657 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 658 dwc->hsphy_interface && 659 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 660 ret = dwc3_ulpi_init(dwc); 661 662 return ret; 663 } 664 665 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) 666 { 667 u32 reg; 668 669 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index)); 670 671 /* 672 * Make sure UX_EXIT_PX is cleared as that causes issues with some 673 * PHYs. Also, this bit is not supposed to be used in normal operation. 674 */ 675 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 676 677 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ 678 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 679 680 if (dwc->u2ss_inp3_quirk) 681 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 682 683 if (dwc->dis_rxdet_inp3_quirk) 684 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 685 686 if (dwc->req_p1p2p3_quirk) 687 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 688 689 if (dwc->del_p1p2p3_quirk) 690 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 691 692 if (dwc->del_phy_power_chg_quirk) 693 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 694 695 if (dwc->lfps_filter_quirk) 696 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 697 698 if (dwc->rx_detect_poll_quirk) 699 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 700 701 if (dwc->tx_de_emphasis_quirk) 702 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 703 704 if (dwc->dis_del_phy_power_chg_quirk) 705 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 706 707 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg); 708 709 return 0; 710 } 711 712 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) 713 { 714 u32 reg; 715 716 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index)); 717 718 /* Select the HS PHY interface */ 719 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 720 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 721 if (dwc->hsphy_interface && 722 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 723 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 724 break; 725 } else if (dwc->hsphy_interface && 726 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 727 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 728 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 729 } else { 730 /* Relying on default value. */ 731 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 732 break; 733 } 734 fallthrough; 735 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 736 default: 737 break; 738 } 739 740 switch (dwc->hsphy_mode) { 741 case USBPHY_INTERFACE_MODE_UTMI: 742 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 743 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 744 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 745 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 746 break; 747 case USBPHY_INTERFACE_MODE_UTMIW: 748 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 749 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 750 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 751 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 752 break; 753 default: 754 break; 755 } 756 757 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ 758 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 759 760 if (dwc->dis_enblslpm_quirk) 761 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 762 else 763 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 764 765 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 766 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 767 768 /* 769 * Some ULPI USB PHY does not support internal VBUS supply, to drive 770 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 771 * bit of OTG_CTRL register. Controller configures the USB2 PHY 772 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 773 * with an external supply. 774 */ 775 if (dwc->ulpi_ext_vbus_drv) 776 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 777 778 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 779 780 return 0; 781 } 782 783 /** 784 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 785 * @dwc: Pointer to our controller context structure 786 * 787 * Returns 0 on success. The USB PHY interfaces are configured but not 788 * initialized. The PHY interfaces and the PHYs get initialized together with 789 * the core in dwc3_core_init. 790 */ 791 static int dwc3_phy_setup(struct dwc3 *dwc) 792 { 793 int i; 794 int ret; 795 796 for (i = 0; i < dwc->num_usb3_ports; i++) { 797 ret = dwc3_ss_phy_setup(dwc, i); 798 if (ret) 799 return ret; 800 } 801 802 for (i = 0; i < dwc->num_usb2_ports; i++) { 803 ret = dwc3_hs_phy_setup(dwc, i); 804 if (ret) 805 return ret; 806 } 807 808 return 0; 809 } 810 811 static int dwc3_phy_init(struct dwc3 *dwc) 812 { 813 int ret; 814 int i; 815 int j; 816 817 usb_phy_init(dwc->usb2_phy); 818 usb_phy_init(dwc->usb3_phy); 819 820 for (i = 0; i < dwc->num_usb2_ports; i++) { 821 ret = phy_init(dwc->usb2_generic_phy[i]); 822 if (ret < 0) 823 goto err_exit_usb2_phy; 824 } 825 826 for (j = 0; j < dwc->num_usb3_ports; j++) { 827 ret = phy_init(dwc->usb3_generic_phy[j]); 828 if (ret < 0) 829 goto err_exit_usb3_phy; 830 } 831 832 /* 833 * Above DWC_usb3.0 1.94a, it is recommended to set 834 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during 835 * coreConsultant configuration. So default value will be '0' when the 836 * core is reset. Application needs to set it to '1' after the core 837 * initialization is completed. 838 * 839 * Certain phy requires to be in P0 power state during initialization. 840 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear 841 * prior to phy init to maintain in the P0 state. 842 * 843 * After phy initialization, some phy operations can only be executed 844 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and 845 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid 846 * blocking phy ops. 847 */ 848 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 849 dwc3_enable_susphy(dwc, true); 850 851 return 0; 852 853 err_exit_usb3_phy: 854 while (--j >= 0) 855 phy_exit(dwc->usb3_generic_phy[j]); 856 857 err_exit_usb2_phy: 858 while (--i >= 0) 859 phy_exit(dwc->usb2_generic_phy[i]); 860 861 usb_phy_shutdown(dwc->usb3_phy); 862 usb_phy_shutdown(dwc->usb2_phy); 863 864 return ret; 865 } 866 867 static void dwc3_phy_exit(struct dwc3 *dwc) 868 { 869 int i; 870 871 for (i = 0; i < dwc->num_usb3_ports; i++) 872 phy_exit(dwc->usb3_generic_phy[i]); 873 874 for (i = 0; i < dwc->num_usb2_ports; i++) 875 phy_exit(dwc->usb2_generic_phy[i]); 876 877 usb_phy_shutdown(dwc->usb3_phy); 878 usb_phy_shutdown(dwc->usb2_phy); 879 } 880 881 static int dwc3_phy_power_on(struct dwc3 *dwc) 882 { 883 int ret; 884 int i; 885 int j; 886 887 usb_phy_set_suspend(dwc->usb2_phy, 0); 888 usb_phy_set_suspend(dwc->usb3_phy, 0); 889 890 for (i = 0; i < dwc->num_usb2_ports; i++) { 891 ret = phy_power_on(dwc->usb2_generic_phy[i]); 892 if (ret < 0) 893 goto err_power_off_usb2_phy; 894 } 895 896 for (j = 0; j < dwc->num_usb3_ports; j++) { 897 ret = phy_power_on(dwc->usb3_generic_phy[j]); 898 if (ret < 0) 899 goto err_power_off_usb3_phy; 900 } 901 902 return 0; 903 904 err_power_off_usb3_phy: 905 while (--j >= 0) 906 phy_power_off(dwc->usb3_generic_phy[j]); 907 908 err_power_off_usb2_phy: 909 while (--i >= 0) 910 phy_power_off(dwc->usb2_generic_phy[i]); 911 912 usb_phy_set_suspend(dwc->usb3_phy, 1); 913 usb_phy_set_suspend(dwc->usb2_phy, 1); 914 915 return ret; 916 } 917 918 static void dwc3_phy_power_off(struct dwc3 *dwc) 919 { 920 int i; 921 922 for (i = 0; i < dwc->num_usb3_ports; i++) 923 phy_power_off(dwc->usb3_generic_phy[i]); 924 925 for (i = 0; i < dwc->num_usb2_ports; i++) 926 phy_power_off(dwc->usb2_generic_phy[i]); 927 928 usb_phy_set_suspend(dwc->usb3_phy, 1); 929 usb_phy_set_suspend(dwc->usb2_phy, 1); 930 } 931 932 static int dwc3_clk_enable(struct dwc3 *dwc) 933 { 934 int ret; 935 936 ret = clk_prepare_enable(dwc->bus_clk); 937 if (ret) 938 return ret; 939 940 ret = clk_prepare_enable(dwc->ref_clk); 941 if (ret) 942 goto disable_bus_clk; 943 944 ret = clk_prepare_enable(dwc->susp_clk); 945 if (ret) 946 goto disable_ref_clk; 947 948 ret = clk_prepare_enable(dwc->utmi_clk); 949 if (ret) 950 goto disable_susp_clk; 951 952 ret = clk_prepare_enable(dwc->pipe_clk); 953 if (ret) 954 goto disable_utmi_clk; 955 956 return 0; 957 958 disable_utmi_clk: 959 clk_disable_unprepare(dwc->utmi_clk); 960 disable_susp_clk: 961 clk_disable_unprepare(dwc->susp_clk); 962 disable_ref_clk: 963 clk_disable_unprepare(dwc->ref_clk); 964 disable_bus_clk: 965 clk_disable_unprepare(dwc->bus_clk); 966 return ret; 967 } 968 969 static void dwc3_clk_disable(struct dwc3 *dwc) 970 { 971 clk_disable_unprepare(dwc->pipe_clk); 972 clk_disable_unprepare(dwc->utmi_clk); 973 clk_disable_unprepare(dwc->susp_clk); 974 clk_disable_unprepare(dwc->ref_clk); 975 clk_disable_unprepare(dwc->bus_clk); 976 } 977 978 static void dwc3_core_exit(struct dwc3 *dwc) 979 { 980 dwc3_event_buffers_cleanup(dwc); 981 dwc3_phy_power_off(dwc); 982 dwc3_phy_exit(dwc); 983 dwc3_clk_disable(dwc); 984 reset_control_assert(dwc->reset); 985 } 986 987 static bool dwc3_core_is_valid(struct dwc3 *dwc) 988 { 989 u32 reg; 990 991 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 992 dwc->ip = DWC3_GSNPS_ID(reg); 993 994 /* This should read as U3 followed by revision number */ 995 if (DWC3_IP_IS(DWC3)) { 996 dwc->revision = reg; 997 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 998 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 999 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 1000 } else { 1001 return false; 1002 } 1003 1004 return true; 1005 } 1006 1007 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 1008 { 1009 unsigned int power_opt; 1010 unsigned int hw_mode; 1011 u32 reg; 1012 1013 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1014 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 1015 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1016 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 1017 1018 switch (power_opt) { 1019 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 1020 /** 1021 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 1022 * issue which would cause xHCI compliance tests to fail. 1023 * 1024 * Because of that we cannot enable clock gating on such 1025 * configurations. 1026 * 1027 * Refers to: 1028 * 1029 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 1030 * SOF/ITP Mode Used 1031 */ 1032 if ((dwc->dr_mode == USB_DR_MODE_HOST || 1033 dwc->dr_mode == USB_DR_MODE_OTG) && 1034 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 1035 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 1036 else 1037 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 1038 break; 1039 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 1040 /* 1041 * REVISIT Enabling this bit so that host-mode hibernation 1042 * will work. Device-mode hibernation is not yet implemented. 1043 */ 1044 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 1045 break; 1046 default: 1047 /* nothing */ 1048 break; 1049 } 1050 1051 /* 1052 * This is a workaround for STAR#4846132, which only affects 1053 * DWC_usb31 version2.00a operating in host mode. 1054 * 1055 * There is a problem in DWC_usb31 version 2.00a operating 1056 * in host mode that would cause a CSR read timeout When CSR 1057 * read coincides with RAM Clock Gating Entry. By disable 1058 * Clock Gating, sacrificing power consumption for normal 1059 * operation. 1060 */ 1061 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && 1062 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) 1063 reg |= DWC3_GCTL_DSBLCLKGTNG; 1064 1065 /* check if current dwc3 is on simulation board */ 1066 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 1067 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 1068 dwc->is_fpga = true; 1069 } 1070 1071 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 1072 "disable_scramble cannot be used on non-FPGA builds\n"); 1073 1074 if (dwc->disable_scramble_quirk && dwc->is_fpga) 1075 reg |= DWC3_GCTL_DISSCRAMBLE; 1076 else 1077 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1078 1079 if (dwc->u2exit_lfps_quirk) 1080 reg |= DWC3_GCTL_U2EXIT_LFPS; 1081 1082 /* 1083 * WORKAROUND: DWC3 revisions <1.90a have a bug 1084 * where the device can fail to connect at SuperSpeed 1085 * and falls back to high-speed mode which causes 1086 * the device to enter a Connect/Disconnect loop 1087 */ 1088 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 1089 reg |= DWC3_GCTL_U2RSTECN; 1090 1091 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1092 } 1093 1094 static int dwc3_core_get_phy(struct dwc3 *dwc); 1095 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 1096 1097 /* set global incr burst type configuration registers */ 1098 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 1099 { 1100 struct device *dev = dwc->dev; 1101 /* incrx_mode : for INCR burst type. */ 1102 bool incrx_mode; 1103 /* incrx_size : for size of INCRX burst. */ 1104 u32 incrx_size; 1105 u32 *vals; 1106 u32 cfg; 1107 int ntype; 1108 int ret; 1109 int i; 1110 1111 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 1112 1113 /* 1114 * Handle property "snps,incr-burst-type-adjustment". 1115 * Get the number of value from this property: 1116 * result <= 0, means this property is not supported. 1117 * result = 1, means INCRx burst mode supported. 1118 * result > 1, means undefined length burst mode supported. 1119 */ 1120 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 1121 if (ntype <= 0) 1122 return; 1123 1124 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 1125 if (!vals) 1126 return; 1127 1128 /* Get INCR burst type, and parse it */ 1129 ret = device_property_read_u32_array(dev, 1130 "snps,incr-burst-type-adjustment", vals, ntype); 1131 if (ret) { 1132 kfree(vals); 1133 dev_err(dev, "Error to get property\n"); 1134 return; 1135 } 1136 1137 incrx_size = *vals; 1138 1139 if (ntype > 1) { 1140 /* INCRX (undefined length) burst mode */ 1141 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 1142 for (i = 1; i < ntype; i++) { 1143 if (vals[i] > incrx_size) 1144 incrx_size = vals[i]; 1145 } 1146 } else { 1147 /* INCRX burst mode */ 1148 incrx_mode = INCRX_BURST_MODE; 1149 } 1150 1151 kfree(vals); 1152 1153 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1154 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1155 if (incrx_mode) 1156 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1157 switch (incrx_size) { 1158 case 256: 1159 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1160 break; 1161 case 128: 1162 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1163 break; 1164 case 64: 1165 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1166 break; 1167 case 32: 1168 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1169 break; 1170 case 16: 1171 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1172 break; 1173 case 8: 1174 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1175 break; 1176 case 4: 1177 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1178 break; 1179 case 1: 1180 break; 1181 default: 1182 dev_err(dev, "Invalid property\n"); 1183 break; 1184 } 1185 1186 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1187 } 1188 1189 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1190 { 1191 u32 scale; 1192 u32 reg; 1193 1194 if (!dwc->susp_clk) 1195 return; 1196 1197 /* 1198 * The power down scale field specifies how many suspend_clk 1199 * periods fit into a 16KHz clock period. When performing 1200 * the division, round up the remainder. 1201 * 1202 * The power down scale value is calculated using the fastest 1203 * frequency of the suspend_clk. If it isn't fixed (but within 1204 * the accuracy requirement), the driver may not know the max 1205 * rate of the suspend_clk, so only update the power down scale 1206 * if the default is less than the calculated value from 1207 * clk_get_rate() or if the default is questionably high 1208 * (3x or more) to be within the requirement. 1209 */ 1210 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1211 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1212 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1213 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1214 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1215 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1216 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1217 } 1218 } 1219 1220 static void dwc3_config_threshold(struct dwc3 *dwc) 1221 { 1222 u32 reg; 1223 u8 rx_thr_num; 1224 u8 rx_maxburst; 1225 u8 tx_thr_num; 1226 u8 tx_maxburst; 1227 1228 /* 1229 * Must config both number of packets and max burst settings to enable 1230 * RX and/or TX threshold. 1231 */ 1232 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1233 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1234 rx_maxburst = dwc->rx_max_burst_prd; 1235 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1236 tx_maxburst = dwc->tx_max_burst_prd; 1237 1238 if (rx_thr_num && rx_maxburst) { 1239 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1240 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1241 1242 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1243 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1244 1245 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1246 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1247 1248 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1249 } 1250 1251 if (tx_thr_num && tx_maxburst) { 1252 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1253 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1254 1255 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1256 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1257 1258 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1259 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1260 1261 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1262 } 1263 } 1264 1265 rx_thr_num = dwc->rx_thr_num_pkt; 1266 rx_maxburst = dwc->rx_max_burst; 1267 tx_thr_num = dwc->tx_thr_num_pkt; 1268 tx_maxburst = dwc->tx_max_burst; 1269 1270 if (DWC3_IP_IS(DWC3)) { 1271 if (rx_thr_num && rx_maxburst) { 1272 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1273 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1274 1275 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1276 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1277 1278 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1279 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1280 1281 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1282 } 1283 1284 if (tx_thr_num && tx_maxburst) { 1285 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1286 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1287 1288 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1289 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1290 1291 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1292 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1293 1294 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1295 } 1296 } else { 1297 if (rx_thr_num && rx_maxburst) { 1298 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1299 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1300 1301 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1302 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1303 1304 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1305 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1306 1307 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1308 } 1309 1310 if (tx_thr_num && tx_maxburst) { 1311 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1312 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1313 1314 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1315 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1316 1317 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1318 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1319 1320 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1321 } 1322 } 1323 } 1324 1325 /** 1326 * dwc3_core_init - Low-level initialization of DWC3 Core 1327 * @dwc: Pointer to our controller context structure 1328 * 1329 * Returns 0 on success otherwise negative errno. 1330 */ 1331 static int dwc3_core_init(struct dwc3 *dwc) 1332 { 1333 unsigned int hw_mode; 1334 u32 reg; 1335 int ret; 1336 1337 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1338 1339 /* 1340 * Write Linux Version Code to our GUID register so it's easy to figure 1341 * out which kernel version a bug was found. 1342 */ 1343 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1344 1345 ret = dwc3_phy_setup(dwc); 1346 if (ret) 1347 return ret; 1348 1349 if (!dwc->ulpi_ready) { 1350 ret = dwc3_core_ulpi_init(dwc); 1351 if (ret) { 1352 if (ret == -ETIMEDOUT) { 1353 dwc3_core_soft_reset(dwc); 1354 ret = -EPROBE_DEFER; 1355 } 1356 return ret; 1357 } 1358 dwc->ulpi_ready = true; 1359 } 1360 1361 if (!dwc->phys_ready) { 1362 ret = dwc3_core_get_phy(dwc); 1363 if (ret) 1364 goto err_exit_ulpi; 1365 dwc->phys_ready = true; 1366 } 1367 1368 ret = dwc3_phy_init(dwc); 1369 if (ret) 1370 goto err_exit_ulpi; 1371 1372 ret = dwc3_core_soft_reset(dwc); 1373 if (ret) 1374 goto err_exit_phy; 1375 1376 dwc3_core_setup_global_control(dwc); 1377 dwc3_core_num_eps(dwc); 1378 1379 /* Set power down scale of suspend_clk */ 1380 dwc3_set_power_down_clk_scale(dwc); 1381 1382 /* Adjust Frame Length */ 1383 dwc3_frame_length_adjustment(dwc); 1384 1385 /* Adjust Reference Clock Period */ 1386 dwc3_ref_clk_period(dwc); 1387 1388 dwc3_set_incr_burst_type(dwc); 1389 1390 dwc3_config_soc_bus(dwc); 1391 1392 ret = dwc3_phy_power_on(dwc); 1393 if (ret) 1394 goto err_exit_phy; 1395 1396 ret = dwc3_event_buffers_setup(dwc); 1397 if (ret) { 1398 dev_err(dwc->dev, "failed to setup event buffers\n"); 1399 goto err_power_off_phy; 1400 } 1401 1402 /* 1403 * ENDXFER polling is available on version 3.10a and later of 1404 * the DWC_usb3 controller. It is NOT available in the 1405 * DWC_usb31 controller. 1406 */ 1407 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1408 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1409 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1410 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1411 } 1412 1413 /* 1414 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a 1415 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the 1416 * link compliance test (TD7.21) may fail. If the ECN is not 1417 * enabled (GUCTL2[19] = 0), the controller will use the old timer 1418 * value (5us), which is still acceptable for the link compliance 1419 * test. Therefore, do not enable PM TIMER ECM in 3.20a by 1420 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. 1421 */ 1422 if (DWC3_VER_IS(DWC3, 320A)) { 1423 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1424 reg &= ~DWC3_GUCTL2_LC_TIMER; 1425 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1426 } 1427 1428 /* 1429 * When configured in HOST mode, after issuing U3/L2 exit controller 1430 * fails to send proper CRC checksum in CRC5 field. Because of this 1431 * behaviour Transaction Error is generated, resulting in reset and 1432 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1433 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1434 * will correct this problem. This option is to support certain 1435 * legacy ULPI PHYs. 1436 */ 1437 if (dwc->resume_hs_terminations) { 1438 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1439 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1440 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1441 } 1442 1443 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1444 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1445 1446 /* 1447 * Enable hardware control of sending remote wakeup 1448 * in HS when the device is in the L1 state. 1449 */ 1450 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1451 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1452 1453 /* 1454 * Decouple USB 2.0 L1 & L2 events which will allow for 1455 * gadget driver to only receive U3/L2 suspend & wakeup 1456 * events and prevent the more frequent L1 LPM transitions 1457 * from interrupting the driver. 1458 */ 1459 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1460 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1461 1462 if (dwc->dis_tx_ipgap_linecheck_quirk) 1463 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1464 1465 if (dwc->parkmode_disable_ss_quirk) 1466 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1467 1468 if (dwc->parkmode_disable_hs_quirk) 1469 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1470 1471 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) { 1472 if (dwc->maximum_speed == USB_SPEED_FULL || 1473 dwc->maximum_speed == USB_SPEED_HIGH) 1474 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1475 else 1476 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1477 } 1478 1479 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1480 } 1481 1482 dwc3_config_threshold(dwc); 1483 1484 /* 1485 * Modify this for all supported Super Speed ports when 1486 * multiport support is added. 1487 */ 1488 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1489 (DWC3_IP_IS(DWC31)) && 1490 dwc->maximum_speed == USB_SPEED_SUPER) { 1491 int i; 1492 1493 for (i = 0; i < dwc->num_usb3_ports; i++) { 1494 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i)); 1495 reg |= DWC3_LLUCTL_FORCE_GEN1; 1496 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg); 1497 } 1498 } 1499 1500 /* 1501 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and 1502 * prior. When an active endpoint not currently cached in the host 1503 * controller is chosen to be cached to the same index as an endpoint 1504 * receiving NAKs, the endpoint receiving NAKs enters continuous 1505 * retry mode. This prevents it from being evicted from the host 1506 * controller cache, blocking the new endpoint from being cached and 1507 * serviced. 1508 * 1509 * To resolve this, for controller versions 1.70a and 1.80a, set the 1510 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit 1511 * disables the USB2.0 internal retry feature. The GUCTL3[16] register 1512 * function is available only from version 1.70a. 1513 */ 1514 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) { 1515 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 1516 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; 1517 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 1518 } 1519 1520 return 0; 1521 1522 err_power_off_phy: 1523 dwc3_phy_power_off(dwc); 1524 err_exit_phy: 1525 dwc3_phy_exit(dwc); 1526 err_exit_ulpi: 1527 dwc3_ulpi_exit(dwc); 1528 1529 return ret; 1530 } 1531 1532 static int dwc3_core_get_phy(struct dwc3 *dwc) 1533 { 1534 struct device *dev = dwc->dev; 1535 struct device_node *node = dev->of_node; 1536 char phy_name[9]; 1537 int ret; 1538 u8 i; 1539 1540 if (node) { 1541 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1542 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1543 } else { 1544 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1545 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1546 } 1547 1548 if (IS_ERR(dwc->usb2_phy)) { 1549 ret = PTR_ERR(dwc->usb2_phy); 1550 if (ret == -ENXIO || ret == -ENODEV) 1551 dwc->usb2_phy = NULL; 1552 else 1553 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1554 } 1555 1556 if (IS_ERR(dwc->usb3_phy)) { 1557 ret = PTR_ERR(dwc->usb3_phy); 1558 if (ret == -ENXIO || ret == -ENODEV) 1559 dwc->usb3_phy = NULL; 1560 else 1561 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1562 } 1563 1564 for (i = 0; i < dwc->num_usb2_ports; i++) { 1565 if (dwc->num_usb2_ports == 1) 1566 snprintf(phy_name, sizeof(phy_name), "usb2-phy"); 1567 else 1568 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i); 1569 1570 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name); 1571 if (IS_ERR(dwc->usb2_generic_phy[i])) { 1572 ret = PTR_ERR(dwc->usb2_generic_phy[i]); 1573 if (ret == -ENOSYS || ret == -ENODEV) 1574 dwc->usb2_generic_phy[i] = NULL; 1575 else 1576 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1577 phy_name); 1578 } 1579 } 1580 1581 for (i = 0; i < dwc->num_usb3_ports; i++) { 1582 if (dwc->num_usb3_ports == 1) 1583 snprintf(phy_name, sizeof(phy_name), "usb3-phy"); 1584 else 1585 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i); 1586 1587 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name); 1588 if (IS_ERR(dwc->usb3_generic_phy[i])) { 1589 ret = PTR_ERR(dwc->usb3_generic_phy[i]); 1590 if (ret == -ENOSYS || ret == -ENODEV) 1591 dwc->usb3_generic_phy[i] = NULL; 1592 else 1593 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1594 phy_name); 1595 } 1596 } 1597 1598 return 0; 1599 } 1600 1601 static int dwc3_core_init_mode(struct dwc3 *dwc) 1602 { 1603 struct device *dev = dwc->dev; 1604 int ret; 1605 int i; 1606 1607 switch (dwc->dr_mode) { 1608 case USB_DR_MODE_PERIPHERAL: 1609 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); 1610 1611 if (dwc->usb2_phy) 1612 otg_set_vbus(dwc->usb2_phy->otg, false); 1613 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 1614 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 1615 1616 ret = dwc3_gadget_init(dwc); 1617 if (ret) 1618 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1619 break; 1620 case USB_DR_MODE_HOST: 1621 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); 1622 1623 if (dwc->usb2_phy) 1624 otg_set_vbus(dwc->usb2_phy->otg, true); 1625 for (i = 0; i < dwc->num_usb2_ports; i++) 1626 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 1627 for (i = 0; i < dwc->num_usb3_ports; i++) 1628 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 1629 1630 ret = dwc3_host_init(dwc); 1631 if (ret) 1632 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1633 break; 1634 case USB_DR_MODE_OTG: 1635 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1636 ret = dwc3_drd_init(dwc); 1637 if (ret) 1638 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1639 break; 1640 default: 1641 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1642 return -EINVAL; 1643 } 1644 1645 return 0; 1646 } 1647 1648 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1649 { 1650 switch (dwc->dr_mode) { 1651 case USB_DR_MODE_PERIPHERAL: 1652 dwc3_gadget_exit(dwc); 1653 break; 1654 case USB_DR_MODE_HOST: 1655 dwc3_host_exit(dwc); 1656 break; 1657 case USB_DR_MODE_OTG: 1658 dwc3_drd_exit(dwc); 1659 break; 1660 default: 1661 /* do nothing */ 1662 break; 1663 } 1664 1665 /* de-assert DRVVBUS for HOST and OTG mode */ 1666 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 1667 } 1668 1669 static void dwc3_get_software_properties(struct dwc3 *dwc) 1670 { 1671 struct device *tmpdev; 1672 u16 gsbuscfg0_reqinfo; 1673 int ret; 1674 1675 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED; 1676 1677 /* 1678 * Iterate over all parent nodes for finding swnode properties 1679 * and non-DT (non-ABI) properties. 1680 */ 1681 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) { 1682 ret = device_property_read_u16(tmpdev, 1683 "snps,gsbuscfg0-reqinfo", 1684 &gsbuscfg0_reqinfo); 1685 if (!ret) 1686 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo; 1687 } 1688 } 1689 1690 static void dwc3_get_properties(struct dwc3 *dwc) 1691 { 1692 struct device *dev = dwc->dev; 1693 u8 lpm_nyet_threshold; 1694 u8 tx_de_emphasis; 1695 u8 hird_threshold; 1696 u8 rx_thr_num_pkt = 0; 1697 u8 rx_max_burst = 0; 1698 u8 tx_thr_num_pkt = 0; 1699 u8 tx_max_burst = 0; 1700 u8 rx_thr_num_pkt_prd = 0; 1701 u8 rx_max_burst_prd = 0; 1702 u8 tx_thr_num_pkt_prd = 0; 1703 u8 tx_max_burst_prd = 0; 1704 u8 tx_fifo_resize_max_num; 1705 u16 num_hc_interrupters; 1706 1707 /* default to highest possible threshold */ 1708 lpm_nyet_threshold = 0xf; 1709 1710 /* default to -3.5dB de-emphasis */ 1711 tx_de_emphasis = 1; 1712 1713 /* 1714 * default to assert utmi_sleep_n and use maximum allowed HIRD 1715 * threshold value of 0b1100 1716 */ 1717 hird_threshold = 12; 1718 1719 /* 1720 * default to a TXFIFO size large enough to fit 6 max packets. This 1721 * allows for systems with larger bus latencies to have some headroom 1722 * for endpoints that have a large bMaxBurst value. 1723 */ 1724 tx_fifo_resize_max_num = 6; 1725 1726 /* default to a single XHCI interrupter */ 1727 num_hc_interrupters = 1; 1728 1729 dwc->maximum_speed = usb_get_maximum_speed(dev); 1730 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1731 dwc->dr_mode = usb_get_dr_mode(dev); 1732 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1733 1734 dwc->sysdev_is_parent = device_property_read_bool(dev, 1735 "linux,sysdev_is_parent"); 1736 if (dwc->sysdev_is_parent) 1737 dwc->sysdev = dwc->dev->parent; 1738 else 1739 dwc->sysdev = dwc->dev; 1740 1741 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1742 1743 dwc->has_lpm_erratum = device_property_read_bool(dev, 1744 "snps,has-lpm-erratum"); 1745 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1746 &lpm_nyet_threshold); 1747 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1748 "snps,is-utmi-l1-suspend"); 1749 device_property_read_u8(dev, "snps,hird-threshold", 1750 &hird_threshold); 1751 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1752 "snps,dis-start-transfer-quirk"); 1753 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1754 "snps,usb3_lpm_capable"); 1755 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1756 "snps,usb2-lpm-disable"); 1757 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1758 "snps,usb2-gadget-lpm-disable"); 1759 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1760 &rx_thr_num_pkt); 1761 device_property_read_u8(dev, "snps,rx-max-burst", 1762 &rx_max_burst); 1763 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1764 &tx_thr_num_pkt); 1765 device_property_read_u8(dev, "snps,tx-max-burst", 1766 &tx_max_burst); 1767 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1768 &rx_thr_num_pkt_prd); 1769 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1770 &rx_max_burst_prd); 1771 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1772 &tx_thr_num_pkt_prd); 1773 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1774 &tx_max_burst_prd); 1775 device_property_read_u16(dev, "num-hc-interrupters", 1776 &num_hc_interrupters); 1777 /* DWC3 core allowed to have a max of 8 interrupters */ 1778 if (num_hc_interrupters > 8) 1779 num_hc_interrupters = 8; 1780 1781 dwc->do_fifo_resize = device_property_read_bool(dev, 1782 "tx-fifo-resize"); 1783 if (dwc->do_fifo_resize) 1784 device_property_read_u8(dev, "tx-fifo-max-num", 1785 &tx_fifo_resize_max_num); 1786 1787 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1788 "snps,disable_scramble_quirk"); 1789 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1790 "snps,u2exit_lfps_quirk"); 1791 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1792 "snps,u2ss_inp3_quirk"); 1793 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1794 "snps,req_p1p2p3_quirk"); 1795 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1796 "snps,del_p1p2p3_quirk"); 1797 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1798 "snps,del_phy_power_chg_quirk"); 1799 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1800 "snps,lfps_filter_quirk"); 1801 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1802 "snps,rx_detect_poll_quirk"); 1803 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1804 "snps,dis_u3_susphy_quirk"); 1805 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1806 "snps,dis_u2_susphy_quirk"); 1807 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1808 "snps,dis_enblslpm_quirk"); 1809 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1810 "snps,dis-u1-entry-quirk"); 1811 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1812 "snps,dis-u2-entry-quirk"); 1813 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1814 "snps,dis_rxdet_inp3_quirk"); 1815 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1816 "snps,dis-u2-freeclk-exists-quirk"); 1817 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1818 "snps,dis-del-phy-power-chg-quirk"); 1819 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1820 "snps,dis-tx-ipgap-linecheck-quirk"); 1821 dwc->resume_hs_terminations = device_property_read_bool(dev, 1822 "snps,resume-hs-terminations"); 1823 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1824 "snps,ulpi-ext-vbus-drv"); 1825 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1826 "snps,parkmode-disable-ss-quirk"); 1827 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1828 "snps,parkmode-disable-hs-quirk"); 1829 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1830 "snps,gfladj-refclk-lpm-sel-quirk"); 1831 1832 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1833 "snps,tx_de_emphasis_quirk"); 1834 device_property_read_u8(dev, "snps,tx_de_emphasis", 1835 &tx_de_emphasis); 1836 device_property_read_string(dev, "snps,hsphy_interface", 1837 &dwc->hsphy_interface); 1838 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1839 &dwc->fladj); 1840 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1841 &dwc->ref_clk_per); 1842 1843 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1844 "snps,dis_metastability_quirk"); 1845 1846 dwc->dis_split_quirk = device_property_read_bool(dev, 1847 "snps,dis-split-quirk"); 1848 1849 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1850 dwc->tx_de_emphasis = tx_de_emphasis; 1851 1852 dwc->hird_threshold = hird_threshold; 1853 1854 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1855 dwc->rx_max_burst = rx_max_burst; 1856 1857 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1858 dwc->tx_max_burst = tx_max_burst; 1859 1860 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1861 dwc->rx_max_burst_prd = rx_max_burst_prd; 1862 1863 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1864 dwc->tx_max_burst_prd = tx_max_burst_prd; 1865 1866 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1867 1868 dwc->num_hc_interrupters = num_hc_interrupters; 1869 } 1870 1871 /* check whether the core supports IMOD */ 1872 bool dwc3_has_imod(struct dwc3 *dwc) 1873 { 1874 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1875 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1876 DWC3_IP_IS(DWC32); 1877 } 1878 1879 static void dwc3_check_params(struct dwc3 *dwc) 1880 { 1881 struct device *dev = dwc->dev; 1882 unsigned int hwparam_gen = 1883 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1884 1885 /* 1886 * Enable IMOD for all supporting controllers. 1887 * 1888 * Particularly, DWC_usb3 v3.00a must enable this feature for 1889 * the following reason: 1890 * 1891 * Workaround for STAR 9000961433 which affects only version 1892 * 3.00a of the DWC_usb3 core. This prevents the controller 1893 * interrupt from being masked while handling events. IMOD 1894 * allows us to work around this issue. Enable it for the 1895 * affected version. 1896 */ 1897 if (dwc3_has_imod((dwc))) 1898 dwc->imod_interval = 1; 1899 1900 /* Check the maximum_speed parameter */ 1901 switch (dwc->maximum_speed) { 1902 case USB_SPEED_FULL: 1903 case USB_SPEED_HIGH: 1904 break; 1905 case USB_SPEED_SUPER: 1906 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1907 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1908 break; 1909 case USB_SPEED_SUPER_PLUS: 1910 if ((DWC3_IP_IS(DWC32) && 1911 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1912 (!DWC3_IP_IS(DWC32) && 1913 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1914 dev_warn(dev, "UDC doesn't support SSP\n"); 1915 break; 1916 default: 1917 dev_err(dev, "invalid maximum_speed parameter %d\n", 1918 dwc->maximum_speed); 1919 fallthrough; 1920 case USB_SPEED_UNKNOWN: 1921 switch (hwparam_gen) { 1922 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1923 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1924 break; 1925 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1926 if (DWC3_IP_IS(DWC32)) 1927 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1928 else 1929 dwc->maximum_speed = USB_SPEED_SUPER; 1930 break; 1931 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1932 dwc->maximum_speed = USB_SPEED_HIGH; 1933 break; 1934 default: 1935 dwc->maximum_speed = USB_SPEED_SUPER; 1936 break; 1937 } 1938 break; 1939 } 1940 1941 /* 1942 * Currently the controller does not have visibility into the HW 1943 * parameter to determine the maximum number of lanes the HW supports. 1944 * If the number of lanes is not specified in the device property, then 1945 * set the default to support dual-lane for DWC_usb32 and single-lane 1946 * for DWC_usb31 for super-speed-plus. 1947 */ 1948 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1949 switch (dwc->max_ssp_rate) { 1950 case USB_SSP_GEN_2x1: 1951 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1952 dev_warn(dev, "UDC only supports Gen 1\n"); 1953 break; 1954 case USB_SSP_GEN_1x2: 1955 case USB_SSP_GEN_2x2: 1956 if (DWC3_IP_IS(DWC31)) 1957 dev_warn(dev, "UDC only supports single lane\n"); 1958 break; 1959 case USB_SSP_GEN_UNKNOWN: 1960 default: 1961 switch (hwparam_gen) { 1962 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1963 if (DWC3_IP_IS(DWC32)) 1964 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1965 else 1966 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1967 break; 1968 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1969 if (DWC3_IP_IS(DWC32)) 1970 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1971 break; 1972 } 1973 break; 1974 } 1975 } 1976 } 1977 1978 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1979 { 1980 struct device *dev = dwc->dev; 1981 struct device_node *np_phy; 1982 struct extcon_dev *edev = NULL; 1983 const char *name; 1984 1985 if (device_property_present(dev, "extcon")) 1986 return extcon_get_edev_by_phandle(dev, 0); 1987 1988 /* 1989 * Device tree platforms should get extcon via phandle. 1990 * On ACPI platforms, we get the name from a device property. 1991 * This device property is for kernel internal use only and 1992 * is expected to be set by the glue code. 1993 */ 1994 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 1995 return extcon_get_extcon_dev(name); 1996 1997 /* 1998 * Check explicitly if "usb-role-switch" is used since 1999 * extcon_find_edev_by_node() can not be used to check the absence of 2000 * an extcon device. In the absence of an device it will always return 2001 * EPROBE_DEFER. 2002 */ 2003 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 2004 device_property_read_bool(dev, "usb-role-switch")) 2005 return NULL; 2006 2007 /* 2008 * Try to get an extcon device from the USB PHY controller's "port" 2009 * node. Check if it has the "port" node first, to avoid printing the 2010 * error message from underlying code, as it's a valid case: extcon 2011 * device (and "port" node) may be missing in case of "usb-role-switch" 2012 * or OTG mode. 2013 */ 2014 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 2015 if (of_graph_is_present(np_phy)) { 2016 struct device_node *np_conn; 2017 2018 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 2019 if (np_conn) 2020 edev = extcon_find_edev_by_node(np_conn); 2021 of_node_put(np_conn); 2022 } 2023 of_node_put(np_phy); 2024 2025 return edev; 2026 } 2027 2028 static int dwc3_get_clocks(struct dwc3 *dwc) 2029 { 2030 struct device *dev = dwc->dev; 2031 2032 if (!dev->of_node) 2033 return 0; 2034 2035 /* 2036 * Clocks are optional, but new DT platforms should support all clocks 2037 * as required by the DT-binding. 2038 * Some devices have different clock names in legacy device trees, 2039 * check for them to retain backwards compatibility. 2040 */ 2041 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 2042 if (IS_ERR(dwc->bus_clk)) { 2043 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2044 "could not get bus clock\n"); 2045 } 2046 2047 if (dwc->bus_clk == NULL) { 2048 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 2049 if (IS_ERR(dwc->bus_clk)) { 2050 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2051 "could not get bus clock\n"); 2052 } 2053 } 2054 2055 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 2056 if (IS_ERR(dwc->ref_clk)) { 2057 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2058 "could not get ref clock\n"); 2059 } 2060 2061 if (dwc->ref_clk == NULL) { 2062 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 2063 if (IS_ERR(dwc->ref_clk)) { 2064 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2065 "could not get ref clock\n"); 2066 } 2067 } 2068 2069 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 2070 if (IS_ERR(dwc->susp_clk)) { 2071 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2072 "could not get suspend clock\n"); 2073 } 2074 2075 if (dwc->susp_clk == NULL) { 2076 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 2077 if (IS_ERR(dwc->susp_clk)) { 2078 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2079 "could not get suspend clock\n"); 2080 } 2081 } 2082 2083 /* specific to Rockchip RK3588 */ 2084 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 2085 if (IS_ERR(dwc->utmi_clk)) { 2086 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 2087 "could not get utmi clock\n"); 2088 } 2089 2090 /* specific to Rockchip RK3588 */ 2091 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 2092 if (IS_ERR(dwc->pipe_clk)) { 2093 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 2094 "could not get pipe clock\n"); 2095 } 2096 2097 return 0; 2098 } 2099 2100 static int dwc3_get_num_ports(struct dwc3 *dwc) 2101 { 2102 void __iomem *base; 2103 u8 major_revision; 2104 u32 offset; 2105 u32 val; 2106 2107 /* 2108 * Remap xHCI address space to access XHCI ext cap regs since it is 2109 * needed to get information on number of ports present. 2110 */ 2111 base = ioremap(dwc->xhci_resources[0].start, 2112 resource_size(&dwc->xhci_resources[0])); 2113 if (!base) 2114 return -ENOMEM; 2115 2116 offset = 0; 2117 do { 2118 offset = xhci_find_next_ext_cap(base, offset, 2119 XHCI_EXT_CAPS_PROTOCOL); 2120 if (!offset) 2121 break; 2122 2123 val = readl(base + offset); 2124 major_revision = XHCI_EXT_PORT_MAJOR(val); 2125 2126 val = readl(base + offset + 0x08); 2127 if (major_revision == 0x03) { 2128 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val); 2129 } else if (major_revision <= 0x02) { 2130 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val); 2131 } else { 2132 dev_warn(dwc->dev, "unrecognized port major revision %d\n", 2133 major_revision); 2134 } 2135 } while (1); 2136 2137 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n", 2138 dwc->num_usb2_ports, dwc->num_usb3_ports); 2139 2140 iounmap(base); 2141 2142 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS || 2143 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS) 2144 return -EINVAL; 2145 2146 return 0; 2147 } 2148 2149 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) 2150 { 2151 struct power_supply *usb_psy; 2152 const char *usb_psy_name; 2153 int ret; 2154 2155 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); 2156 if (ret < 0) 2157 return NULL; 2158 2159 usb_psy = power_supply_get_by_name(usb_psy_name); 2160 if (!usb_psy) 2161 return ERR_PTR(-EPROBE_DEFER); 2162 2163 return usb_psy; 2164 } 2165 2166 int dwc3_core_probe(const struct dwc3_probe_data *data) 2167 { 2168 struct dwc3 *dwc = data->dwc; 2169 struct device *dev = dwc->dev; 2170 struct resource dwc_res; 2171 unsigned int hw_mode; 2172 void __iomem *regs; 2173 struct resource *res = data->res; 2174 int ret; 2175 2176 dwc->xhci_resources[0].start = res->start; 2177 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 2178 DWC3_XHCI_REGS_END; 2179 dwc->xhci_resources[0].flags = res->flags; 2180 dwc->xhci_resources[0].name = res->name; 2181 2182 /* 2183 * Request memory region but exclude xHCI regs, 2184 * since it will be requested by the xhci-plat driver. 2185 */ 2186 dwc_res = *res; 2187 dwc_res.start += DWC3_GLOBALS_REGS_START; 2188 2189 if (dev->of_node) { 2190 struct device_node *parent = of_get_parent(dev->of_node); 2191 2192 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 2193 dwc_res.start -= DWC3_GLOBALS_REGS_START; 2194 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 2195 } 2196 2197 of_node_put(parent); 2198 } 2199 2200 regs = devm_ioremap_resource(dev, &dwc_res); 2201 if (IS_ERR(regs)) 2202 return PTR_ERR(regs); 2203 2204 dwc->regs = regs; 2205 dwc->regs_size = resource_size(&dwc_res); 2206 2207 dwc3_get_properties(dwc); 2208 2209 dwc3_get_software_properties(dwc); 2210 2211 dwc->usb_psy = dwc3_get_usb_power_supply(dwc); 2212 if (IS_ERR(dwc->usb_psy)) 2213 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); 2214 2215 if (!data->ignore_clocks_and_resets) { 2216 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 2217 if (IS_ERR(dwc->reset)) { 2218 ret = PTR_ERR(dwc->reset); 2219 goto err_put_psy; 2220 } 2221 2222 ret = dwc3_get_clocks(dwc); 2223 if (ret) 2224 goto err_put_psy; 2225 } 2226 2227 ret = reset_control_deassert(dwc->reset); 2228 if (ret) 2229 goto err_put_psy; 2230 2231 ret = dwc3_clk_enable(dwc); 2232 if (ret) 2233 goto err_assert_reset; 2234 2235 if (!dwc3_core_is_valid(dwc)) { 2236 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 2237 ret = -ENODEV; 2238 goto err_disable_clks; 2239 } 2240 2241 dev_set_drvdata(dev, dwc); 2242 dwc3_cache_hwparams(dwc); 2243 2244 if (!dwc->sysdev_is_parent && 2245 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 2246 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 2247 if (ret) 2248 goto err_disable_clks; 2249 } 2250 2251 /* 2252 * Currently only DWC3 controllers that are host-only capable 2253 * can have more than one port. 2254 */ 2255 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 2256 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 2257 ret = dwc3_get_num_ports(dwc); 2258 if (ret) 2259 goto err_disable_clks; 2260 } else { 2261 dwc->num_usb2_ports = 1; 2262 dwc->num_usb3_ports = 1; 2263 } 2264 2265 spin_lock_init(&dwc->lock); 2266 mutex_init(&dwc->mutex); 2267 2268 pm_runtime_get_noresume(dev); 2269 pm_runtime_set_active(dev); 2270 pm_runtime_use_autosuspend(dev); 2271 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 2272 pm_runtime_enable(dev); 2273 2274 pm_runtime_forbid(dev); 2275 2276 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 2277 if (ret) { 2278 dev_err(dwc->dev, "failed to allocate event buffers\n"); 2279 ret = -ENOMEM; 2280 goto err_allow_rpm; 2281 } 2282 2283 dwc->edev = dwc3_get_extcon(dwc); 2284 if (IS_ERR(dwc->edev)) { 2285 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 2286 goto err_free_event_buffers; 2287 } 2288 2289 ret = dwc3_get_dr_mode(dwc); 2290 if (ret) 2291 goto err_free_event_buffers; 2292 2293 ret = dwc3_core_init(dwc); 2294 if (ret) { 2295 dev_err_probe(dev, ret, "failed to initialize core\n"); 2296 goto err_free_event_buffers; 2297 } 2298 2299 dwc3_check_params(dwc); 2300 dwc3_debugfs_init(dwc); 2301 2302 ret = dwc3_core_init_mode(dwc); 2303 if (ret) 2304 goto err_exit_debugfs; 2305 2306 pm_runtime_put(dev); 2307 2308 dma_set_max_seg_size(dev, UINT_MAX); 2309 2310 return 0; 2311 2312 err_exit_debugfs: 2313 dwc3_debugfs_exit(dwc); 2314 dwc3_event_buffers_cleanup(dwc); 2315 dwc3_phy_power_off(dwc); 2316 dwc3_phy_exit(dwc); 2317 dwc3_ulpi_exit(dwc); 2318 err_free_event_buffers: 2319 dwc3_free_event_buffers(dwc); 2320 err_allow_rpm: 2321 pm_runtime_allow(dev); 2322 pm_runtime_disable(dev); 2323 pm_runtime_dont_use_autosuspend(dev); 2324 pm_runtime_set_suspended(dev); 2325 pm_runtime_put_noidle(dev); 2326 err_disable_clks: 2327 dwc3_clk_disable(dwc); 2328 err_assert_reset: 2329 reset_control_assert(dwc->reset); 2330 err_put_psy: 2331 if (dwc->usb_psy) 2332 power_supply_put(dwc->usb_psy); 2333 2334 return ret; 2335 } 2336 EXPORT_SYMBOL_GPL(dwc3_core_probe); 2337 2338 static int dwc3_probe(struct platform_device *pdev) 2339 { 2340 struct dwc3_probe_data probe_data = {}; 2341 struct resource *res; 2342 struct dwc3 *dwc; 2343 2344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2345 if (!res) { 2346 dev_err(&pdev->dev, "missing memory resource\n"); 2347 return -ENODEV; 2348 } 2349 2350 dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL); 2351 if (!dwc) 2352 return -ENOMEM; 2353 2354 dwc->dev = &pdev->dev; 2355 dwc->glue_ops = NULL; 2356 2357 probe_data.dwc = dwc; 2358 probe_data.res = res; 2359 2360 return dwc3_core_probe(&probe_data); 2361 } 2362 2363 void dwc3_core_remove(struct dwc3 *dwc) 2364 { 2365 pm_runtime_get_sync(dwc->dev); 2366 2367 dwc3_core_exit_mode(dwc); 2368 dwc3_debugfs_exit(dwc); 2369 2370 dwc3_core_exit(dwc); 2371 dwc3_ulpi_exit(dwc); 2372 2373 pm_runtime_allow(dwc->dev); 2374 pm_runtime_disable(dwc->dev); 2375 pm_runtime_dont_use_autosuspend(dwc->dev); 2376 pm_runtime_put_noidle(dwc->dev); 2377 /* 2378 * HACK: Clear the driver data, which is currently accessed by parent 2379 * glue drivers, before allowing the parent to suspend. 2380 */ 2381 dev_set_drvdata(dwc->dev, NULL); 2382 pm_runtime_set_suspended(dwc->dev); 2383 2384 dwc3_free_event_buffers(dwc); 2385 2386 if (dwc->usb_psy) 2387 power_supply_put(dwc->usb_psy); 2388 } 2389 EXPORT_SYMBOL_GPL(dwc3_core_remove); 2390 2391 static void dwc3_remove(struct platform_device *pdev) 2392 { 2393 dwc3_core_remove(platform_get_drvdata(pdev)); 2394 } 2395 2396 #ifdef CONFIG_PM 2397 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2398 { 2399 int ret; 2400 2401 ret = reset_control_deassert(dwc->reset); 2402 if (ret) 2403 return ret; 2404 2405 ret = dwc3_clk_enable(dwc); 2406 if (ret) 2407 goto assert_reset; 2408 2409 ret = dwc3_core_init(dwc); 2410 if (ret) 2411 goto disable_clks; 2412 2413 return 0; 2414 2415 disable_clks: 2416 dwc3_clk_disable(dwc); 2417 assert_reset: 2418 reset_control_assert(dwc->reset); 2419 2420 return ret; 2421 } 2422 2423 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2424 { 2425 u32 reg; 2426 int i; 2427 int ret; 2428 2429 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { 2430 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & 2431 DWC3_GUSB2PHYCFG_SUSPHY) || 2432 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & 2433 DWC3_GUSB3PIPECTL_SUSPHY); 2434 /* 2435 * TI AM62 platform requires SUSPHY to be 2436 * enabled for system suspend to work. 2437 */ 2438 if (!dwc->susphy_state) 2439 dwc3_enable_susphy(dwc, true); 2440 } 2441 2442 switch (dwc->current_dr_role) { 2443 case DWC3_GCTL_PRTCAP_DEVICE: 2444 if (pm_runtime_suspended(dwc->dev)) 2445 break; 2446 ret = dwc3_gadget_suspend(dwc); 2447 if (ret) 2448 return ret; 2449 synchronize_irq(dwc->irq_gadget); 2450 dwc3_core_exit(dwc); 2451 break; 2452 case DWC3_GCTL_PRTCAP_HOST: 2453 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2454 dwc3_core_exit(dwc); 2455 break; 2456 } 2457 2458 /* Let controller to suspend HSPHY before PHY driver suspends */ 2459 if (dwc->dis_u2_susphy_quirk || 2460 dwc->dis_enblslpm_quirk) { 2461 for (i = 0; i < dwc->num_usb2_ports; i++) { 2462 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2463 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2464 DWC3_GUSB2PHYCFG_SUSPHY; 2465 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2466 } 2467 2468 /* Give some time for USB2 PHY to suspend */ 2469 usleep_range(5000, 6000); 2470 } 2471 2472 for (i = 0; i < dwc->num_usb2_ports; i++) 2473 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]); 2474 for (i = 0; i < dwc->num_usb3_ports; i++) 2475 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]); 2476 break; 2477 case DWC3_GCTL_PRTCAP_OTG: 2478 /* do nothing during runtime_suspend */ 2479 if (PMSG_IS_AUTO(msg)) 2480 break; 2481 2482 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2483 ret = dwc3_gadget_suspend(dwc); 2484 if (ret) 2485 return ret; 2486 synchronize_irq(dwc->irq_gadget); 2487 } 2488 2489 dwc3_otg_exit(dwc); 2490 dwc3_core_exit(dwc); 2491 break; 2492 default: 2493 /* do nothing */ 2494 break; 2495 } 2496 2497 return 0; 2498 } 2499 2500 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2501 { 2502 int ret; 2503 u32 reg; 2504 int i; 2505 2506 switch (dwc->current_dr_role) { 2507 case DWC3_GCTL_PRTCAP_DEVICE: 2508 ret = dwc3_core_init_for_resume(dwc); 2509 if (ret) 2510 return ret; 2511 2512 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 2513 dwc3_gadget_resume(dwc); 2514 break; 2515 case DWC3_GCTL_PRTCAP_HOST: 2516 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2517 ret = dwc3_core_init_for_resume(dwc); 2518 if (ret) 2519 return ret; 2520 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); 2521 break; 2522 } 2523 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2524 for (i = 0; i < dwc->num_usb2_ports; i++) { 2525 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2526 if (dwc->dis_u2_susphy_quirk) 2527 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2528 2529 if (dwc->dis_enblslpm_quirk) 2530 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2531 2532 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2533 } 2534 2535 for (i = 0; i < dwc->num_usb2_ports; i++) 2536 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]); 2537 for (i = 0; i < dwc->num_usb3_ports; i++) 2538 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]); 2539 break; 2540 case DWC3_GCTL_PRTCAP_OTG: 2541 /* nothing to do on runtime_resume */ 2542 if (PMSG_IS_AUTO(msg)) 2543 break; 2544 2545 ret = dwc3_core_init_for_resume(dwc); 2546 if (ret) 2547 return ret; 2548 2549 dwc3_set_prtcap(dwc, dwc->current_dr_role, true); 2550 2551 dwc3_otg_init(dwc); 2552 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2553 dwc3_otg_host_init(dwc); 2554 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2555 dwc3_gadget_resume(dwc); 2556 } 2557 2558 break; 2559 default: 2560 /* do nothing */ 2561 break; 2562 } 2563 2564 if (!PMSG_IS_AUTO(msg)) { 2565 /* restore SUSPHY state to that before system suspend. */ 2566 dwc3_enable_susphy(dwc, dwc->susphy_state); 2567 } 2568 2569 return 0; 2570 } 2571 2572 static int dwc3_runtime_checks(struct dwc3 *dwc) 2573 { 2574 switch (dwc->current_dr_role) { 2575 case DWC3_GCTL_PRTCAP_DEVICE: 2576 if (dwc->connected) 2577 return -EBUSY; 2578 break; 2579 case DWC3_GCTL_PRTCAP_HOST: 2580 default: 2581 /* do nothing */ 2582 break; 2583 } 2584 2585 return 0; 2586 } 2587 2588 int dwc3_runtime_suspend(struct dwc3 *dwc) 2589 { 2590 int ret; 2591 2592 if (dwc3_runtime_checks(dwc)) 2593 return -EBUSY; 2594 2595 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2596 if (ret) 2597 return ret; 2598 2599 return 0; 2600 } 2601 EXPORT_SYMBOL_GPL(dwc3_runtime_suspend); 2602 2603 int dwc3_runtime_resume(struct dwc3 *dwc) 2604 { 2605 struct device *dev = dwc->dev; 2606 int ret; 2607 2608 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2609 if (ret) 2610 return ret; 2611 2612 switch (dwc->current_dr_role) { 2613 case DWC3_GCTL_PRTCAP_DEVICE: 2614 if (dwc->pending_events) { 2615 pm_runtime_put(dev); 2616 dwc->pending_events = false; 2617 enable_irq(dwc->irq_gadget); 2618 } 2619 break; 2620 case DWC3_GCTL_PRTCAP_HOST: 2621 default: 2622 /* do nothing */ 2623 break; 2624 } 2625 2626 pm_runtime_mark_last_busy(dev); 2627 2628 return 0; 2629 } 2630 EXPORT_SYMBOL_GPL(dwc3_runtime_resume); 2631 2632 int dwc3_runtime_idle(struct dwc3 *dwc) 2633 { 2634 struct device *dev = dwc->dev; 2635 2636 switch (dwc->current_dr_role) { 2637 case DWC3_GCTL_PRTCAP_DEVICE: 2638 if (dwc3_runtime_checks(dwc)) 2639 return -EBUSY; 2640 break; 2641 case DWC3_GCTL_PRTCAP_HOST: 2642 default: 2643 /* do nothing */ 2644 break; 2645 } 2646 2647 pm_runtime_mark_last_busy(dev); 2648 pm_runtime_autosuspend(dev); 2649 2650 return 0; 2651 } 2652 EXPORT_SYMBOL_GPL(dwc3_runtime_idle); 2653 2654 static int dwc3_plat_runtime_suspend(struct device *dev) 2655 { 2656 return dwc3_runtime_suspend(dev_get_drvdata(dev)); 2657 } 2658 2659 static int dwc3_plat_runtime_resume(struct device *dev) 2660 { 2661 return dwc3_runtime_resume(dev_get_drvdata(dev)); 2662 } 2663 2664 static int dwc3_plat_runtime_idle(struct device *dev) 2665 { 2666 return dwc3_runtime_idle(dev_get_drvdata(dev)); 2667 } 2668 #endif /* CONFIG_PM */ 2669 2670 #ifdef CONFIG_PM_SLEEP 2671 int dwc3_pm_suspend(struct dwc3 *dwc) 2672 { 2673 struct device *dev = dwc->dev; 2674 int ret; 2675 2676 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2677 if (ret) 2678 return ret; 2679 2680 pinctrl_pm_select_sleep_state(dev); 2681 2682 return 0; 2683 } 2684 EXPORT_SYMBOL_GPL(dwc3_pm_suspend); 2685 2686 int dwc3_pm_resume(struct dwc3 *dwc) 2687 { 2688 struct device *dev = dwc->dev; 2689 int ret = 0; 2690 2691 pinctrl_pm_select_default_state(dev); 2692 2693 pm_runtime_disable(dev); 2694 ret = pm_runtime_set_active(dev); 2695 if (ret) 2696 goto out; 2697 2698 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2699 if (ret) 2700 pm_runtime_set_suspended(dev); 2701 2702 out: 2703 pm_runtime_enable(dev); 2704 2705 return ret; 2706 } 2707 EXPORT_SYMBOL_GPL(dwc3_pm_resume); 2708 2709 void dwc3_pm_complete(struct dwc3 *dwc) 2710 { 2711 u32 reg; 2712 2713 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2714 dwc->dis_split_quirk) { 2715 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2716 reg |= DWC3_GUCTL3_SPLITDISABLE; 2717 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2718 } 2719 } 2720 EXPORT_SYMBOL_GPL(dwc3_pm_complete); 2721 2722 int dwc3_pm_prepare(struct dwc3 *dwc) 2723 { 2724 struct device *dev = dwc->dev; 2725 2726 /* 2727 * Indicate to the PM core that it may safely leave the device in 2728 * runtime suspend if runtime-suspended already in device mode. 2729 */ 2730 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE && 2731 pm_runtime_suspended(dev) && 2732 !dev_pinctrl(dev)) 2733 return 1; 2734 2735 return 0; 2736 } 2737 EXPORT_SYMBOL_GPL(dwc3_pm_prepare); 2738 2739 static int dwc3_plat_suspend(struct device *dev) 2740 { 2741 return dwc3_pm_suspend(dev_get_drvdata(dev)); 2742 } 2743 2744 static int dwc3_plat_resume(struct device *dev) 2745 { 2746 return dwc3_pm_resume(dev_get_drvdata(dev)); 2747 } 2748 2749 static void dwc3_plat_complete(struct device *dev) 2750 { 2751 dwc3_pm_complete(dev_get_drvdata(dev)); 2752 } 2753 2754 static int dwc3_plat_prepare(struct device *dev) 2755 { 2756 return dwc3_pm_prepare(dev_get_drvdata(dev)); 2757 } 2758 #else 2759 #define dwc3_plat_complete NULL 2760 #define dwc3_plat_prepare NULL 2761 #endif /* CONFIG_PM_SLEEP */ 2762 2763 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2764 SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume) 2765 .complete = dwc3_plat_complete, 2766 .prepare = dwc3_plat_prepare, 2767 /* 2768 * Runtime suspend halts the controller on disconnection. It relies on 2769 * platforms with custom connection notification to start the controller 2770 * again. 2771 */ 2772 SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume, 2773 dwc3_plat_runtime_idle) 2774 }; 2775 2776 #ifdef CONFIG_OF 2777 static const struct of_device_id of_dwc3_match[] = { 2778 { 2779 .compatible = "snps,dwc3" 2780 }, 2781 { 2782 .compatible = "synopsys,dwc3" 2783 }, 2784 { }, 2785 }; 2786 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2787 #endif 2788 2789 #ifdef CONFIG_ACPI 2790 2791 #define ACPI_ID_INTEL_BSW "808622B7" 2792 2793 static const struct acpi_device_id dwc3_acpi_match[] = { 2794 { ACPI_ID_INTEL_BSW, 0 }, 2795 { }, 2796 }; 2797 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2798 #endif 2799 2800 static struct platform_driver dwc3_driver = { 2801 .probe = dwc3_probe, 2802 .remove = dwc3_remove, 2803 .driver = { 2804 .name = "dwc3", 2805 .of_match_table = of_match_ptr(of_dwc3_match), 2806 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2807 .pm = &dwc3_dev_pm_ops, 2808 }, 2809 }; 2810 2811 module_platform_driver(dwc3_driver); 2812 2813 MODULE_ALIAS("platform:dwc3"); 2814 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2815 MODULE_LICENSE("GPL v2"); 2816 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2817