1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_graph.h> 27 #include <linux/acpi.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/reset.h> 30 #include <linux/bitfield.h> 31 32 #include <linux/usb/ch9.h> 33 #include <linux/usb/gadget.h> 34 #include <linux/usb/of.h> 35 #include <linux/usb/otg.h> 36 37 #include "core.h" 38 #include "gadget.h" 39 #include "io.h" 40 41 #include "debug.h" 42 #include "../host/xhci-ext-caps.h" 43 44 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 45 46 /** 47 * dwc3_get_dr_mode - Validates and sets dr_mode 48 * @dwc: pointer to our context structure 49 */ 50 static int dwc3_get_dr_mode(struct dwc3 *dwc) 51 { 52 enum usb_dr_mode mode; 53 struct device *dev = dwc->dev; 54 unsigned int hw_mode; 55 56 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 57 dwc->dr_mode = USB_DR_MODE_OTG; 58 59 mode = dwc->dr_mode; 60 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 61 62 switch (hw_mode) { 63 case DWC3_GHWPARAMS0_MODE_GADGET: 64 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 65 dev_err(dev, 66 "Controller does not support host mode.\n"); 67 return -EINVAL; 68 } 69 mode = USB_DR_MODE_PERIPHERAL; 70 break; 71 case DWC3_GHWPARAMS0_MODE_HOST: 72 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 73 dev_err(dev, 74 "Controller does not support device mode.\n"); 75 return -EINVAL; 76 } 77 mode = USB_DR_MODE_HOST; 78 break; 79 default: 80 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 81 mode = USB_DR_MODE_HOST; 82 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 83 mode = USB_DR_MODE_PERIPHERAL; 84 85 /* 86 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 87 * mode. If the controller supports DRD but the dr_mode is not 88 * specified or set to OTG, then set the mode to peripheral. 89 */ 90 if (mode == USB_DR_MODE_OTG && !dwc->edev && 91 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 92 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 93 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 94 mode = USB_DR_MODE_PERIPHERAL; 95 } 96 97 if (mode != dwc->dr_mode) { 98 dev_warn(dev, 99 "Configuration mismatch. dr_mode forced to %s\n", 100 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 101 102 dwc->dr_mode = mode; 103 } 104 105 return 0; 106 } 107 108 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) 109 { 110 u32 reg; 111 int i; 112 113 for (i = 0; i < dwc->num_usb3_ports; i++) { 114 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i)); 115 if (enable && !dwc->dis_u3_susphy_quirk) 116 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 117 else 118 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 119 120 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg); 121 } 122 123 for (i = 0; i < dwc->num_usb2_ports; i++) { 124 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 125 if (enable && !dwc->dis_u2_susphy_quirk) 126 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 127 else 128 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 129 130 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 131 } 132 } 133 134 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) 135 { 136 unsigned int hw_mode; 137 u32 reg; 138 139 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 140 141 /* 142 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and 143 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, 144 * and they can be set after core initialization. 145 */ 146 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 147 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { 148 if (DWC3_GCTL_PRTCAP(reg) != mode) 149 dwc3_enable_susphy(dwc, false); 150 } 151 152 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 153 reg |= DWC3_GCTL_PRTCAPDIR(mode); 154 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 155 156 dwc->current_dr_role = mode; 157 } 158 159 static void __dwc3_set_mode(struct work_struct *work) 160 { 161 struct dwc3 *dwc = work_to_dwc(work); 162 unsigned long flags; 163 int ret; 164 u32 reg; 165 u32 desired_dr_role; 166 int i; 167 168 mutex_lock(&dwc->mutex); 169 spin_lock_irqsave(&dwc->lock, flags); 170 desired_dr_role = dwc->desired_dr_role; 171 spin_unlock_irqrestore(&dwc->lock, flags); 172 173 pm_runtime_get_sync(dwc->dev); 174 175 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 176 dwc3_otg_update(dwc, 0); 177 178 if (!desired_dr_role) 179 goto out; 180 181 if (desired_dr_role == dwc->current_dr_role) 182 goto out; 183 184 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 185 goto out; 186 187 switch (dwc->current_dr_role) { 188 case DWC3_GCTL_PRTCAP_HOST: 189 dwc3_host_exit(dwc); 190 break; 191 case DWC3_GCTL_PRTCAP_DEVICE: 192 dwc3_gadget_exit(dwc); 193 dwc3_event_buffers_cleanup(dwc); 194 break; 195 case DWC3_GCTL_PRTCAP_OTG: 196 dwc3_otg_exit(dwc); 197 spin_lock_irqsave(&dwc->lock, flags); 198 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 199 spin_unlock_irqrestore(&dwc->lock, flags); 200 dwc3_otg_update(dwc, 1); 201 break; 202 default: 203 break; 204 } 205 206 /* 207 * When current_dr_role is not set, there's no role switching. 208 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 209 */ 210 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 211 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 212 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 213 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 214 reg |= DWC3_GCTL_CORESOFTRESET; 215 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 216 217 /* 218 * Wait for internal clocks to synchronized. DWC_usb31 and 219 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 220 * keep it consistent across different IPs, let's wait up to 221 * 100ms before clearing GCTL.CORESOFTRESET. 222 */ 223 msleep(100); 224 225 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 226 reg &= ~DWC3_GCTL_CORESOFTRESET; 227 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 228 } 229 230 spin_lock_irqsave(&dwc->lock, flags); 231 232 dwc3_set_prtcap(dwc, desired_dr_role, false); 233 234 spin_unlock_irqrestore(&dwc->lock, flags); 235 236 switch (desired_dr_role) { 237 case DWC3_GCTL_PRTCAP_HOST: 238 ret = dwc3_host_init(dwc); 239 if (ret) { 240 dev_err(dwc->dev, "failed to initialize host\n"); 241 } else { 242 if (dwc->usb2_phy) 243 otg_set_vbus(dwc->usb2_phy->otg, true); 244 245 for (i = 0; i < dwc->num_usb2_ports; i++) 246 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 247 for (i = 0; i < dwc->num_usb3_ports; i++) 248 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 249 250 if (dwc->dis_split_quirk) { 251 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 252 reg |= DWC3_GUCTL3_SPLITDISABLE; 253 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 254 } 255 } 256 break; 257 case DWC3_GCTL_PRTCAP_DEVICE: 258 dwc3_core_soft_reset(dwc); 259 260 dwc3_event_buffers_setup(dwc); 261 262 if (dwc->usb2_phy) 263 otg_set_vbus(dwc->usb2_phy->otg, false); 264 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 265 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 266 267 ret = dwc3_gadget_init(dwc); 268 if (ret) 269 dev_err(dwc->dev, "failed to initialize peripheral\n"); 270 break; 271 case DWC3_GCTL_PRTCAP_OTG: 272 dwc3_otg_init(dwc); 273 dwc3_otg_update(dwc, 0); 274 break; 275 default: 276 break; 277 } 278 279 out: 280 pm_runtime_mark_last_busy(dwc->dev); 281 pm_runtime_put_autosuspend(dwc->dev); 282 mutex_unlock(&dwc->mutex); 283 } 284 285 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 286 { 287 unsigned long flags; 288 289 if (dwc->dr_mode != USB_DR_MODE_OTG) 290 return; 291 292 spin_lock_irqsave(&dwc->lock, flags); 293 dwc->desired_dr_role = mode; 294 spin_unlock_irqrestore(&dwc->lock, flags); 295 296 queue_work(system_freezable_wq, &dwc->drd_work); 297 } 298 299 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 300 { 301 struct dwc3 *dwc = dep->dwc; 302 u32 reg; 303 304 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 305 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 306 DWC3_GDBGFIFOSPACE_TYPE(type)); 307 308 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 309 310 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 311 } 312 313 /** 314 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 315 * @dwc: pointer to our context structure 316 */ 317 int dwc3_core_soft_reset(struct dwc3 *dwc) 318 { 319 u32 reg; 320 int retries = 1000; 321 322 /* 323 * We're resetting only the device side because, if we're in host mode, 324 * XHCI driver will reset the host block. If dwc3 was configured for 325 * host-only mode, then we can return early. 326 */ 327 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 328 return 0; 329 330 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 331 reg |= DWC3_DCTL_CSFTRST; 332 reg &= ~DWC3_DCTL_RUN_STOP; 333 dwc3_gadget_dctl_write_safe(dwc, reg); 334 335 /* 336 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 337 * is cleared only after all the clocks are synchronized. This can 338 * take a little more than 50ms. Set the polling rate at 20ms 339 * for 10 times instead. 340 */ 341 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 342 retries = 10; 343 344 do { 345 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 346 if (!(reg & DWC3_DCTL_CSFTRST)) 347 goto done; 348 349 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 350 msleep(20); 351 else 352 udelay(1); 353 } while (--retries); 354 355 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 356 return -ETIMEDOUT; 357 358 done: 359 /* 360 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 361 * is cleared, we must wait at least 50ms before accessing the PHY 362 * domain (synchronization delay). 363 */ 364 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 365 msleep(50); 366 367 return 0; 368 } 369 370 /* 371 * dwc3_frame_length_adjustment - Adjusts frame length if required 372 * @dwc3: Pointer to our controller context structure 373 */ 374 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 375 { 376 u32 reg; 377 u32 dft; 378 379 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 380 return; 381 382 if (dwc->fladj == 0) 383 return; 384 385 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 386 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 387 if (dft != dwc->fladj) { 388 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 389 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 390 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 391 } 392 } 393 394 /** 395 * dwc3_ref_clk_period - Reference clock period configuration 396 * Default reference clock period depends on hardware 397 * configuration. For systems with reference clock that differs 398 * from the default, this will set clock period in DWC3_GUCTL 399 * register. 400 * @dwc: Pointer to our controller context structure 401 */ 402 static void dwc3_ref_clk_period(struct dwc3 *dwc) 403 { 404 unsigned long period; 405 unsigned long fladj; 406 unsigned long decr; 407 unsigned long rate; 408 u32 reg; 409 410 if (dwc->ref_clk) { 411 rate = clk_get_rate(dwc->ref_clk); 412 if (!rate) 413 return; 414 period = NSEC_PER_SEC / rate; 415 } else if (dwc->ref_clk_per) { 416 period = dwc->ref_clk_per; 417 rate = NSEC_PER_SEC / period; 418 } else { 419 return; 420 } 421 422 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 423 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 424 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 425 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 426 427 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 428 return; 429 430 /* 431 * The calculation below is 432 * 433 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 434 * 435 * but rearranged for fixed-point arithmetic. The division must be 436 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 437 * neither does rate * period). 438 * 439 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 440 * nanoseconds of error caused by the truncation which happened during 441 * the division when calculating rate or period (whichever one was 442 * derived from the other). We first calculate the relative error, then 443 * scale it to units of 8 ppm. 444 */ 445 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 446 fladj -= 125000; 447 448 /* 449 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 450 */ 451 decr = 480000000 / rate; 452 453 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 454 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 455 & ~DWC3_GFLADJ_240MHZDECR 456 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 457 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 458 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 459 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 460 461 if (dwc->gfladj_refclk_lpm_sel) 462 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 463 464 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 465 } 466 467 /** 468 * dwc3_free_one_event_buffer - Frees one event buffer 469 * @dwc: Pointer to our controller context structure 470 * @evt: Pointer to event buffer to be freed 471 */ 472 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 473 struct dwc3_event_buffer *evt) 474 { 475 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 476 } 477 478 /** 479 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 480 * @dwc: Pointer to our controller context structure 481 * @length: size of the event buffer 482 * 483 * Returns a pointer to the allocated event buffer structure on success 484 * otherwise ERR_PTR(errno). 485 */ 486 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 487 unsigned int length) 488 { 489 struct dwc3_event_buffer *evt; 490 491 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 492 if (!evt) 493 return ERR_PTR(-ENOMEM); 494 495 evt->dwc = dwc; 496 evt->length = length; 497 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 498 if (!evt->cache) 499 return ERR_PTR(-ENOMEM); 500 501 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 502 &evt->dma, GFP_KERNEL); 503 if (!evt->buf) 504 return ERR_PTR(-ENOMEM); 505 506 return evt; 507 } 508 509 /** 510 * dwc3_free_event_buffers - frees all allocated event buffers 511 * @dwc: Pointer to our controller context structure 512 */ 513 static void dwc3_free_event_buffers(struct dwc3 *dwc) 514 { 515 struct dwc3_event_buffer *evt; 516 517 evt = dwc->ev_buf; 518 if (evt) 519 dwc3_free_one_event_buffer(dwc, evt); 520 } 521 522 /** 523 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 524 * @dwc: pointer to our controller context structure 525 * @length: size of event buffer 526 * 527 * Returns 0 on success otherwise negative errno. In the error case, dwc 528 * may contain some buffers allocated but not all which were requested. 529 */ 530 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 531 { 532 struct dwc3_event_buffer *evt; 533 unsigned int hw_mode; 534 535 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 536 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 537 dwc->ev_buf = NULL; 538 return 0; 539 } 540 541 evt = dwc3_alloc_one_event_buffer(dwc, length); 542 if (IS_ERR(evt)) { 543 dev_err(dwc->dev, "can't allocate event buffer\n"); 544 return PTR_ERR(evt); 545 } 546 dwc->ev_buf = evt; 547 548 return 0; 549 } 550 551 /** 552 * dwc3_event_buffers_setup - setup our allocated event buffers 553 * @dwc: pointer to our controller context structure 554 * 555 * Returns 0 on success otherwise negative errno. 556 */ 557 int dwc3_event_buffers_setup(struct dwc3 *dwc) 558 { 559 struct dwc3_event_buffer *evt; 560 u32 reg; 561 562 if (!dwc->ev_buf) 563 return 0; 564 565 evt = dwc->ev_buf; 566 evt->lpos = 0; 567 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 568 lower_32_bits(evt->dma)); 569 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 570 upper_32_bits(evt->dma)); 571 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 572 DWC3_GEVNTSIZ_SIZE(evt->length)); 573 574 /* Clear any stale event */ 575 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 576 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 577 return 0; 578 } 579 580 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 581 { 582 struct dwc3_event_buffer *evt; 583 u32 reg; 584 585 if (!dwc->ev_buf) 586 return; 587 /* 588 * Exynos platforms may not be able to access event buffer if the 589 * controller failed to halt on dwc3_core_exit(). 590 */ 591 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 592 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 593 return; 594 595 evt = dwc->ev_buf; 596 597 evt->lpos = 0; 598 599 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 600 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 601 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 602 | DWC3_GEVNTSIZ_SIZE(0)); 603 604 /* Clear any stale event */ 605 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 606 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 607 } 608 609 static void dwc3_core_num_eps(struct dwc3 *dwc) 610 { 611 struct dwc3_hwparams *parms = &dwc->hwparams; 612 613 dwc->num_eps = DWC3_NUM_EPS(parms); 614 } 615 616 static void dwc3_cache_hwparams(struct dwc3 *dwc) 617 { 618 struct dwc3_hwparams *parms = &dwc->hwparams; 619 620 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 621 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 622 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 623 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 624 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 625 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 626 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 627 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 628 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 629 630 if (DWC3_IP_IS(DWC32)) 631 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 632 } 633 634 static void dwc3_config_soc_bus(struct dwc3 *dwc) 635 { 636 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 637 u32 reg; 638 639 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 640 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0); 641 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo); 642 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg); 643 } 644 } 645 646 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 647 { 648 int intf; 649 int ret = 0; 650 651 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 652 653 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 654 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 655 dwc->hsphy_interface && 656 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 657 ret = dwc3_ulpi_init(dwc); 658 659 return ret; 660 } 661 662 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) 663 { 664 u32 reg; 665 666 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index)); 667 668 /* 669 * Make sure UX_EXIT_PX is cleared as that causes issues with some 670 * PHYs. Also, this bit is not supposed to be used in normal operation. 671 */ 672 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 673 674 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ 675 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 676 677 if (dwc->u2ss_inp3_quirk) 678 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 679 680 if (dwc->dis_rxdet_inp3_quirk) 681 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 682 683 if (dwc->req_p1p2p3_quirk) 684 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 685 686 if (dwc->del_p1p2p3_quirk) 687 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 688 689 if (dwc->del_phy_power_chg_quirk) 690 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 691 692 if (dwc->lfps_filter_quirk) 693 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 694 695 if (dwc->rx_detect_poll_quirk) 696 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 697 698 if (dwc->tx_de_emphasis_quirk) 699 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 700 701 if (dwc->dis_del_phy_power_chg_quirk) 702 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 703 704 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg); 705 706 return 0; 707 } 708 709 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) 710 { 711 u32 reg; 712 713 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index)); 714 715 /* Select the HS PHY interface */ 716 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 717 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 718 if (dwc->hsphy_interface && 719 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 720 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 721 break; 722 } else if (dwc->hsphy_interface && 723 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 724 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 725 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 726 } else { 727 /* Relying on default value. */ 728 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 729 break; 730 } 731 fallthrough; 732 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 733 default: 734 break; 735 } 736 737 switch (dwc->hsphy_mode) { 738 case USBPHY_INTERFACE_MODE_UTMI: 739 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 740 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 741 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 742 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 743 break; 744 case USBPHY_INTERFACE_MODE_UTMIW: 745 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 746 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 747 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 748 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 749 break; 750 default: 751 break; 752 } 753 754 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ 755 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 756 757 if (dwc->dis_enblslpm_quirk) 758 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 759 else 760 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 761 762 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 763 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 764 765 /* 766 * Some ULPI USB PHY does not support internal VBUS supply, to drive 767 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 768 * bit of OTG_CTRL register. Controller configures the USB2 PHY 769 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 770 * with an external supply. 771 */ 772 if (dwc->ulpi_ext_vbus_drv) 773 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 774 775 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 776 777 return 0; 778 } 779 780 /** 781 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 782 * @dwc: Pointer to our controller context structure 783 * 784 * Returns 0 on success. The USB PHY interfaces are configured but not 785 * initialized. The PHY interfaces and the PHYs get initialized together with 786 * the core in dwc3_core_init. 787 */ 788 static int dwc3_phy_setup(struct dwc3 *dwc) 789 { 790 int i; 791 int ret; 792 793 for (i = 0; i < dwc->num_usb3_ports; i++) { 794 ret = dwc3_ss_phy_setup(dwc, i); 795 if (ret) 796 return ret; 797 } 798 799 for (i = 0; i < dwc->num_usb2_ports; i++) { 800 ret = dwc3_hs_phy_setup(dwc, i); 801 if (ret) 802 return ret; 803 } 804 805 return 0; 806 } 807 808 static int dwc3_phy_init(struct dwc3 *dwc) 809 { 810 int ret; 811 int i; 812 int j; 813 814 usb_phy_init(dwc->usb2_phy); 815 usb_phy_init(dwc->usb3_phy); 816 817 for (i = 0; i < dwc->num_usb2_ports; i++) { 818 ret = phy_init(dwc->usb2_generic_phy[i]); 819 if (ret < 0) 820 goto err_exit_usb2_phy; 821 } 822 823 for (j = 0; j < dwc->num_usb3_ports; j++) { 824 ret = phy_init(dwc->usb3_generic_phy[j]); 825 if (ret < 0) 826 goto err_exit_usb3_phy; 827 } 828 829 /* 830 * Above DWC_usb3.0 1.94a, it is recommended to set 831 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during 832 * coreConsultant configuration. So default value will be '0' when the 833 * core is reset. Application needs to set it to '1' after the core 834 * initialization is completed. 835 * 836 * Certain phy requires to be in P0 power state during initialization. 837 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear 838 * prior to phy init to maintain in the P0 state. 839 * 840 * After phy initialization, some phy operations can only be executed 841 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and 842 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid 843 * blocking phy ops. 844 */ 845 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 846 dwc3_enable_susphy(dwc, true); 847 848 return 0; 849 850 err_exit_usb3_phy: 851 while (--j >= 0) 852 phy_exit(dwc->usb3_generic_phy[j]); 853 854 err_exit_usb2_phy: 855 while (--i >= 0) 856 phy_exit(dwc->usb2_generic_phy[i]); 857 858 usb_phy_shutdown(dwc->usb3_phy); 859 usb_phy_shutdown(dwc->usb2_phy); 860 861 return ret; 862 } 863 864 static void dwc3_phy_exit(struct dwc3 *dwc) 865 { 866 int i; 867 868 for (i = 0; i < dwc->num_usb3_ports; i++) 869 phy_exit(dwc->usb3_generic_phy[i]); 870 871 for (i = 0; i < dwc->num_usb2_ports; i++) 872 phy_exit(dwc->usb2_generic_phy[i]); 873 874 usb_phy_shutdown(dwc->usb3_phy); 875 usb_phy_shutdown(dwc->usb2_phy); 876 } 877 878 static int dwc3_phy_power_on(struct dwc3 *dwc) 879 { 880 int ret; 881 int i; 882 int j; 883 884 usb_phy_set_suspend(dwc->usb2_phy, 0); 885 usb_phy_set_suspend(dwc->usb3_phy, 0); 886 887 for (i = 0; i < dwc->num_usb2_ports; i++) { 888 ret = phy_power_on(dwc->usb2_generic_phy[i]); 889 if (ret < 0) 890 goto err_power_off_usb2_phy; 891 } 892 893 for (j = 0; j < dwc->num_usb3_ports; j++) { 894 ret = phy_power_on(dwc->usb3_generic_phy[j]); 895 if (ret < 0) 896 goto err_power_off_usb3_phy; 897 } 898 899 return 0; 900 901 err_power_off_usb3_phy: 902 while (--j >= 0) 903 phy_power_off(dwc->usb3_generic_phy[j]); 904 905 err_power_off_usb2_phy: 906 while (--i >= 0) 907 phy_power_off(dwc->usb2_generic_phy[i]); 908 909 usb_phy_set_suspend(dwc->usb3_phy, 1); 910 usb_phy_set_suspend(dwc->usb2_phy, 1); 911 912 return ret; 913 } 914 915 static void dwc3_phy_power_off(struct dwc3 *dwc) 916 { 917 int i; 918 919 for (i = 0; i < dwc->num_usb3_ports; i++) 920 phy_power_off(dwc->usb3_generic_phy[i]); 921 922 for (i = 0; i < dwc->num_usb2_ports; i++) 923 phy_power_off(dwc->usb2_generic_phy[i]); 924 925 usb_phy_set_suspend(dwc->usb3_phy, 1); 926 usb_phy_set_suspend(dwc->usb2_phy, 1); 927 } 928 929 static int dwc3_clk_enable(struct dwc3 *dwc) 930 { 931 int ret; 932 933 ret = clk_prepare_enable(dwc->bus_clk); 934 if (ret) 935 return ret; 936 937 ret = clk_prepare_enable(dwc->ref_clk); 938 if (ret) 939 goto disable_bus_clk; 940 941 ret = clk_prepare_enable(dwc->susp_clk); 942 if (ret) 943 goto disable_ref_clk; 944 945 ret = clk_prepare_enable(dwc->utmi_clk); 946 if (ret) 947 goto disable_susp_clk; 948 949 ret = clk_prepare_enable(dwc->pipe_clk); 950 if (ret) 951 goto disable_utmi_clk; 952 953 return 0; 954 955 disable_utmi_clk: 956 clk_disable_unprepare(dwc->utmi_clk); 957 disable_susp_clk: 958 clk_disable_unprepare(dwc->susp_clk); 959 disable_ref_clk: 960 clk_disable_unprepare(dwc->ref_clk); 961 disable_bus_clk: 962 clk_disable_unprepare(dwc->bus_clk); 963 return ret; 964 } 965 966 static void dwc3_clk_disable(struct dwc3 *dwc) 967 { 968 clk_disable_unprepare(dwc->pipe_clk); 969 clk_disable_unprepare(dwc->utmi_clk); 970 clk_disable_unprepare(dwc->susp_clk); 971 clk_disable_unprepare(dwc->ref_clk); 972 clk_disable_unprepare(dwc->bus_clk); 973 } 974 975 static void dwc3_core_exit(struct dwc3 *dwc) 976 { 977 dwc3_event_buffers_cleanup(dwc); 978 dwc3_phy_power_off(dwc); 979 dwc3_phy_exit(dwc); 980 dwc3_clk_disable(dwc); 981 reset_control_assert(dwc->reset); 982 } 983 984 static bool dwc3_core_is_valid(struct dwc3 *dwc) 985 { 986 u32 reg; 987 988 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 989 dwc->ip = DWC3_GSNPS_ID(reg); 990 991 /* This should read as U3 followed by revision number */ 992 if (DWC3_IP_IS(DWC3)) { 993 dwc->revision = reg; 994 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 995 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 996 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 997 } else { 998 return false; 999 } 1000 1001 return true; 1002 } 1003 1004 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 1005 { 1006 unsigned int power_opt; 1007 unsigned int hw_mode; 1008 u32 reg; 1009 1010 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1011 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 1012 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1013 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 1014 1015 switch (power_opt) { 1016 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 1017 /** 1018 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 1019 * issue which would cause xHCI compliance tests to fail. 1020 * 1021 * Because of that we cannot enable clock gating on such 1022 * configurations. 1023 * 1024 * Refers to: 1025 * 1026 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 1027 * SOF/ITP Mode Used 1028 */ 1029 if ((dwc->dr_mode == USB_DR_MODE_HOST || 1030 dwc->dr_mode == USB_DR_MODE_OTG) && 1031 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 1032 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 1033 else 1034 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 1035 break; 1036 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 1037 /* 1038 * REVISIT Enabling this bit so that host-mode hibernation 1039 * will work. Device-mode hibernation is not yet implemented. 1040 */ 1041 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 1042 break; 1043 default: 1044 /* nothing */ 1045 break; 1046 } 1047 1048 /* 1049 * This is a workaround for STAR#4846132, which only affects 1050 * DWC_usb31 version2.00a operating in host mode. 1051 * 1052 * There is a problem in DWC_usb31 version 2.00a operating 1053 * in host mode that would cause a CSR read timeout When CSR 1054 * read coincides with RAM Clock Gating Entry. By disable 1055 * Clock Gating, sacrificing power consumption for normal 1056 * operation. 1057 */ 1058 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && 1059 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) 1060 reg |= DWC3_GCTL_DSBLCLKGTNG; 1061 1062 /* check if current dwc3 is on simulation board */ 1063 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 1064 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 1065 dwc->is_fpga = true; 1066 } 1067 1068 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 1069 "disable_scramble cannot be used on non-FPGA builds\n"); 1070 1071 if (dwc->disable_scramble_quirk && dwc->is_fpga) 1072 reg |= DWC3_GCTL_DISSCRAMBLE; 1073 else 1074 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1075 1076 if (dwc->u2exit_lfps_quirk) 1077 reg |= DWC3_GCTL_U2EXIT_LFPS; 1078 1079 /* 1080 * WORKAROUND: DWC3 revisions <1.90a have a bug 1081 * where the device can fail to connect at SuperSpeed 1082 * and falls back to high-speed mode which causes 1083 * the device to enter a Connect/Disconnect loop 1084 */ 1085 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 1086 reg |= DWC3_GCTL_U2RSTECN; 1087 1088 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1089 } 1090 1091 static int dwc3_core_get_phy(struct dwc3 *dwc); 1092 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 1093 1094 /* set global incr burst type configuration registers */ 1095 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 1096 { 1097 struct device *dev = dwc->dev; 1098 /* incrx_mode : for INCR burst type. */ 1099 bool incrx_mode; 1100 /* incrx_size : for size of INCRX burst. */ 1101 u32 incrx_size; 1102 u32 *vals; 1103 u32 cfg; 1104 int ntype; 1105 int ret; 1106 int i; 1107 1108 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 1109 1110 /* 1111 * Handle property "snps,incr-burst-type-adjustment". 1112 * Get the number of value from this property: 1113 * result <= 0, means this property is not supported. 1114 * result = 1, means INCRx burst mode supported. 1115 * result > 1, means undefined length burst mode supported. 1116 */ 1117 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 1118 if (ntype <= 0) 1119 return; 1120 1121 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 1122 if (!vals) 1123 return; 1124 1125 /* Get INCR burst type, and parse it */ 1126 ret = device_property_read_u32_array(dev, 1127 "snps,incr-burst-type-adjustment", vals, ntype); 1128 if (ret) { 1129 kfree(vals); 1130 dev_err(dev, "Error to get property\n"); 1131 return; 1132 } 1133 1134 incrx_size = *vals; 1135 1136 if (ntype > 1) { 1137 /* INCRX (undefined length) burst mode */ 1138 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 1139 for (i = 1; i < ntype; i++) { 1140 if (vals[i] > incrx_size) 1141 incrx_size = vals[i]; 1142 } 1143 } else { 1144 /* INCRX burst mode */ 1145 incrx_mode = INCRX_BURST_MODE; 1146 } 1147 1148 kfree(vals); 1149 1150 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1151 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1152 if (incrx_mode) 1153 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1154 switch (incrx_size) { 1155 case 256: 1156 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1157 break; 1158 case 128: 1159 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1160 break; 1161 case 64: 1162 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1163 break; 1164 case 32: 1165 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1166 break; 1167 case 16: 1168 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1169 break; 1170 case 8: 1171 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1172 break; 1173 case 4: 1174 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1175 break; 1176 case 1: 1177 break; 1178 default: 1179 dev_err(dev, "Invalid property\n"); 1180 break; 1181 } 1182 1183 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1184 } 1185 1186 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1187 { 1188 u32 scale; 1189 u32 reg; 1190 1191 if (!dwc->susp_clk) 1192 return; 1193 1194 /* 1195 * The power down scale field specifies how many suspend_clk 1196 * periods fit into a 16KHz clock period. When performing 1197 * the division, round up the remainder. 1198 * 1199 * The power down scale value is calculated using the fastest 1200 * frequency of the suspend_clk. If it isn't fixed (but within 1201 * the accuracy requirement), the driver may not know the max 1202 * rate of the suspend_clk, so only update the power down scale 1203 * if the default is less than the calculated value from 1204 * clk_get_rate() or if the default is questionably high 1205 * (3x or more) to be within the requirement. 1206 */ 1207 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1208 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1209 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1210 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1211 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1212 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1213 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1214 } 1215 } 1216 1217 static void dwc3_config_threshold(struct dwc3 *dwc) 1218 { 1219 u32 reg; 1220 u8 rx_thr_num; 1221 u8 rx_maxburst; 1222 u8 tx_thr_num; 1223 u8 tx_maxburst; 1224 1225 /* 1226 * Must config both number of packets and max burst settings to enable 1227 * RX and/or TX threshold. 1228 */ 1229 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1230 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1231 rx_maxburst = dwc->rx_max_burst_prd; 1232 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1233 tx_maxburst = dwc->tx_max_burst_prd; 1234 1235 if (rx_thr_num && rx_maxburst) { 1236 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1237 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1238 1239 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1240 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1241 1242 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1243 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1244 1245 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1246 } 1247 1248 if (tx_thr_num && tx_maxburst) { 1249 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1250 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1251 1252 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1253 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1254 1255 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1256 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1257 1258 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1259 } 1260 } 1261 1262 rx_thr_num = dwc->rx_thr_num_pkt; 1263 rx_maxburst = dwc->rx_max_burst; 1264 tx_thr_num = dwc->tx_thr_num_pkt; 1265 tx_maxburst = dwc->tx_max_burst; 1266 1267 if (DWC3_IP_IS(DWC3)) { 1268 if (rx_thr_num && rx_maxburst) { 1269 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1270 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1271 1272 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1273 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1274 1275 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1276 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1277 1278 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1279 } 1280 1281 if (tx_thr_num && tx_maxburst) { 1282 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1283 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1284 1285 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1286 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1287 1288 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1289 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1290 1291 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1292 } 1293 } else { 1294 if (rx_thr_num && rx_maxburst) { 1295 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1296 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1297 1298 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1299 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1300 1301 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1302 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1303 1304 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1305 } 1306 1307 if (tx_thr_num && tx_maxburst) { 1308 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1309 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1310 1311 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1312 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1313 1314 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1315 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1316 1317 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1318 } 1319 } 1320 } 1321 1322 /** 1323 * dwc3_core_init - Low-level initialization of DWC3 Core 1324 * @dwc: Pointer to our controller context structure 1325 * 1326 * Returns 0 on success otherwise negative errno. 1327 */ 1328 static int dwc3_core_init(struct dwc3 *dwc) 1329 { 1330 unsigned int hw_mode; 1331 u32 reg; 1332 int ret; 1333 1334 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1335 1336 /* 1337 * Write Linux Version Code to our GUID register so it's easy to figure 1338 * out which kernel version a bug was found. 1339 */ 1340 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1341 1342 ret = dwc3_phy_setup(dwc); 1343 if (ret) 1344 return ret; 1345 1346 if (!dwc->ulpi_ready) { 1347 ret = dwc3_core_ulpi_init(dwc); 1348 if (ret) { 1349 if (ret == -ETIMEDOUT) { 1350 dwc3_core_soft_reset(dwc); 1351 ret = -EPROBE_DEFER; 1352 } 1353 return ret; 1354 } 1355 dwc->ulpi_ready = true; 1356 } 1357 1358 if (!dwc->phys_ready) { 1359 ret = dwc3_core_get_phy(dwc); 1360 if (ret) 1361 goto err_exit_ulpi; 1362 dwc->phys_ready = true; 1363 } 1364 1365 ret = dwc3_phy_init(dwc); 1366 if (ret) 1367 goto err_exit_ulpi; 1368 1369 ret = dwc3_core_soft_reset(dwc); 1370 if (ret) 1371 goto err_exit_phy; 1372 1373 dwc3_core_setup_global_control(dwc); 1374 dwc3_core_num_eps(dwc); 1375 1376 /* Set power down scale of suspend_clk */ 1377 dwc3_set_power_down_clk_scale(dwc); 1378 1379 /* Adjust Frame Length */ 1380 dwc3_frame_length_adjustment(dwc); 1381 1382 /* Adjust Reference Clock Period */ 1383 dwc3_ref_clk_period(dwc); 1384 1385 dwc3_set_incr_burst_type(dwc); 1386 1387 dwc3_config_soc_bus(dwc); 1388 1389 ret = dwc3_phy_power_on(dwc); 1390 if (ret) 1391 goto err_exit_phy; 1392 1393 ret = dwc3_event_buffers_setup(dwc); 1394 if (ret) { 1395 dev_err(dwc->dev, "failed to setup event buffers\n"); 1396 goto err_power_off_phy; 1397 } 1398 1399 /* 1400 * ENDXFER polling is available on version 3.10a and later of 1401 * the DWC_usb3 controller. It is NOT available in the 1402 * DWC_usb31 controller. 1403 */ 1404 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1405 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1406 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1407 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1408 } 1409 1410 /* 1411 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a 1412 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the 1413 * link compliance test (TD7.21) may fail. If the ECN is not 1414 * enabled (GUCTL2[19] = 0), the controller will use the old timer 1415 * value (5us), which is still acceptable for the link compliance 1416 * test. Therefore, do not enable PM TIMER ECM in 3.20a by 1417 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. 1418 */ 1419 if (DWC3_VER_IS(DWC3, 320A)) { 1420 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1421 reg &= ~DWC3_GUCTL2_LC_TIMER; 1422 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1423 } 1424 1425 /* 1426 * When configured in HOST mode, after issuing U3/L2 exit controller 1427 * fails to send proper CRC checksum in CRC5 field. Because of this 1428 * behaviour Transaction Error is generated, resulting in reset and 1429 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1430 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1431 * will correct this problem. This option is to support certain 1432 * legacy ULPI PHYs. 1433 */ 1434 if (dwc->resume_hs_terminations) { 1435 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1436 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1437 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1438 } 1439 1440 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1441 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1442 1443 /* 1444 * Enable hardware control of sending remote wakeup 1445 * in HS when the device is in the L1 state. 1446 */ 1447 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1448 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1449 1450 /* 1451 * Decouple USB 2.0 L1 & L2 events which will allow for 1452 * gadget driver to only receive U3/L2 suspend & wakeup 1453 * events and prevent the more frequent L1 LPM transitions 1454 * from interrupting the driver. 1455 */ 1456 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1457 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1458 1459 if (dwc->dis_tx_ipgap_linecheck_quirk) 1460 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1461 1462 if (dwc->parkmode_disable_ss_quirk) 1463 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1464 1465 if (dwc->parkmode_disable_hs_quirk) 1466 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1467 1468 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) { 1469 if (dwc->maximum_speed == USB_SPEED_FULL || 1470 dwc->maximum_speed == USB_SPEED_HIGH) 1471 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1472 else 1473 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1474 } 1475 1476 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1477 } 1478 1479 dwc3_config_threshold(dwc); 1480 1481 /* 1482 * Modify this for all supported Super Speed ports when 1483 * multiport support is added. 1484 */ 1485 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1486 (DWC3_IP_IS(DWC31)) && 1487 dwc->maximum_speed == USB_SPEED_SUPER) { 1488 int i; 1489 1490 for (i = 0; i < dwc->num_usb3_ports; i++) { 1491 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i)); 1492 reg |= DWC3_LLUCTL_FORCE_GEN1; 1493 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg); 1494 } 1495 } 1496 1497 /* 1498 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and 1499 * prior. When an active endpoint not currently cached in the host 1500 * controller is chosen to be cached to the same index as an endpoint 1501 * receiving NAKs, the endpoint receiving NAKs enters continuous 1502 * retry mode. This prevents it from being evicted from the host 1503 * controller cache, blocking the new endpoint from being cached and 1504 * serviced. 1505 * 1506 * To resolve this, for controller versions 1.70a and 1.80a, set the 1507 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit 1508 * disables the USB2.0 internal retry feature. The GUCTL3[16] register 1509 * function is available only from version 1.70a. 1510 */ 1511 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) { 1512 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 1513 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; 1514 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 1515 } 1516 1517 return 0; 1518 1519 err_power_off_phy: 1520 dwc3_phy_power_off(dwc); 1521 err_exit_phy: 1522 dwc3_phy_exit(dwc); 1523 err_exit_ulpi: 1524 dwc3_ulpi_exit(dwc); 1525 1526 return ret; 1527 } 1528 1529 static int dwc3_core_get_phy(struct dwc3 *dwc) 1530 { 1531 struct device *dev = dwc->dev; 1532 struct device_node *node = dev->of_node; 1533 char phy_name[9]; 1534 int ret; 1535 u8 i; 1536 1537 if (node) { 1538 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1539 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1540 } else { 1541 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1542 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1543 } 1544 1545 if (IS_ERR(dwc->usb2_phy)) { 1546 ret = PTR_ERR(dwc->usb2_phy); 1547 if (ret == -ENXIO || ret == -ENODEV) 1548 dwc->usb2_phy = NULL; 1549 else 1550 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1551 } 1552 1553 if (IS_ERR(dwc->usb3_phy)) { 1554 ret = PTR_ERR(dwc->usb3_phy); 1555 if (ret == -ENXIO || ret == -ENODEV) 1556 dwc->usb3_phy = NULL; 1557 else 1558 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1559 } 1560 1561 for (i = 0; i < dwc->num_usb2_ports; i++) { 1562 if (dwc->num_usb2_ports == 1) 1563 snprintf(phy_name, sizeof(phy_name), "usb2-phy"); 1564 else 1565 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i); 1566 1567 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name); 1568 if (IS_ERR(dwc->usb2_generic_phy[i])) { 1569 ret = PTR_ERR(dwc->usb2_generic_phy[i]); 1570 if (ret == -ENOSYS || ret == -ENODEV) 1571 dwc->usb2_generic_phy[i] = NULL; 1572 else 1573 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1574 phy_name); 1575 } 1576 } 1577 1578 for (i = 0; i < dwc->num_usb3_ports; i++) { 1579 if (dwc->num_usb3_ports == 1) 1580 snprintf(phy_name, sizeof(phy_name), "usb3-phy"); 1581 else 1582 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i); 1583 1584 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name); 1585 if (IS_ERR(dwc->usb3_generic_phy[i])) { 1586 ret = PTR_ERR(dwc->usb3_generic_phy[i]); 1587 if (ret == -ENOSYS || ret == -ENODEV) 1588 dwc->usb3_generic_phy[i] = NULL; 1589 else 1590 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1591 phy_name); 1592 } 1593 } 1594 1595 return 0; 1596 } 1597 1598 static int dwc3_core_init_mode(struct dwc3 *dwc) 1599 { 1600 struct device *dev = dwc->dev; 1601 int ret; 1602 int i; 1603 1604 switch (dwc->dr_mode) { 1605 case USB_DR_MODE_PERIPHERAL: 1606 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); 1607 1608 if (dwc->usb2_phy) 1609 otg_set_vbus(dwc->usb2_phy->otg, false); 1610 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 1611 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 1612 1613 ret = dwc3_gadget_init(dwc); 1614 if (ret) 1615 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1616 break; 1617 case USB_DR_MODE_HOST: 1618 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); 1619 1620 if (dwc->usb2_phy) 1621 otg_set_vbus(dwc->usb2_phy->otg, true); 1622 for (i = 0; i < dwc->num_usb2_ports; i++) 1623 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 1624 for (i = 0; i < dwc->num_usb3_ports; i++) 1625 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 1626 1627 ret = dwc3_host_init(dwc); 1628 if (ret) 1629 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1630 break; 1631 case USB_DR_MODE_OTG: 1632 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1633 ret = dwc3_drd_init(dwc); 1634 if (ret) 1635 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1636 break; 1637 default: 1638 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1639 return -EINVAL; 1640 } 1641 1642 return 0; 1643 } 1644 1645 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1646 { 1647 switch (dwc->dr_mode) { 1648 case USB_DR_MODE_PERIPHERAL: 1649 dwc3_gadget_exit(dwc); 1650 break; 1651 case USB_DR_MODE_HOST: 1652 dwc3_host_exit(dwc); 1653 break; 1654 case USB_DR_MODE_OTG: 1655 dwc3_drd_exit(dwc); 1656 break; 1657 default: 1658 /* do nothing */ 1659 break; 1660 } 1661 1662 /* de-assert DRVVBUS for HOST and OTG mode */ 1663 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 1664 } 1665 1666 static void dwc3_get_software_properties(struct dwc3 *dwc) 1667 { 1668 struct device *tmpdev; 1669 u16 gsbuscfg0_reqinfo; 1670 int ret; 1671 1672 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED; 1673 1674 /* 1675 * Iterate over all parent nodes for finding swnode properties 1676 * and non-DT (non-ABI) properties. 1677 */ 1678 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) { 1679 ret = device_property_read_u16(tmpdev, 1680 "snps,gsbuscfg0-reqinfo", 1681 &gsbuscfg0_reqinfo); 1682 if (!ret) 1683 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo; 1684 } 1685 } 1686 1687 static void dwc3_get_properties(struct dwc3 *dwc) 1688 { 1689 struct device *dev = dwc->dev; 1690 u8 lpm_nyet_threshold; 1691 u8 tx_de_emphasis; 1692 u8 hird_threshold; 1693 u8 rx_thr_num_pkt = 0; 1694 u8 rx_max_burst = 0; 1695 u8 tx_thr_num_pkt = 0; 1696 u8 tx_max_burst = 0; 1697 u8 rx_thr_num_pkt_prd = 0; 1698 u8 rx_max_burst_prd = 0; 1699 u8 tx_thr_num_pkt_prd = 0; 1700 u8 tx_max_burst_prd = 0; 1701 u8 tx_fifo_resize_max_num; 1702 1703 /* default to highest possible threshold */ 1704 lpm_nyet_threshold = 0xf; 1705 1706 /* default to -3.5dB de-emphasis */ 1707 tx_de_emphasis = 1; 1708 1709 /* 1710 * default to assert utmi_sleep_n and use maximum allowed HIRD 1711 * threshold value of 0b1100 1712 */ 1713 hird_threshold = 12; 1714 1715 /* 1716 * default to a TXFIFO size large enough to fit 6 max packets. This 1717 * allows for systems with larger bus latencies to have some headroom 1718 * for endpoints that have a large bMaxBurst value. 1719 */ 1720 tx_fifo_resize_max_num = 6; 1721 1722 dwc->maximum_speed = usb_get_maximum_speed(dev); 1723 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1724 dwc->dr_mode = usb_get_dr_mode(dev); 1725 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1726 1727 dwc->sysdev_is_parent = device_property_read_bool(dev, 1728 "linux,sysdev_is_parent"); 1729 if (dwc->sysdev_is_parent) 1730 dwc->sysdev = dwc->dev->parent; 1731 else 1732 dwc->sysdev = dwc->dev; 1733 1734 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1735 1736 dwc->has_lpm_erratum = device_property_read_bool(dev, 1737 "snps,has-lpm-erratum"); 1738 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1739 &lpm_nyet_threshold); 1740 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1741 "snps,is-utmi-l1-suspend"); 1742 device_property_read_u8(dev, "snps,hird-threshold", 1743 &hird_threshold); 1744 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1745 "snps,dis-start-transfer-quirk"); 1746 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1747 "snps,usb3_lpm_capable"); 1748 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1749 "snps,usb2-lpm-disable"); 1750 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1751 "snps,usb2-gadget-lpm-disable"); 1752 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1753 &rx_thr_num_pkt); 1754 device_property_read_u8(dev, "snps,rx-max-burst", 1755 &rx_max_burst); 1756 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1757 &tx_thr_num_pkt); 1758 device_property_read_u8(dev, "snps,tx-max-burst", 1759 &tx_max_burst); 1760 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1761 &rx_thr_num_pkt_prd); 1762 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1763 &rx_max_burst_prd); 1764 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1765 &tx_thr_num_pkt_prd); 1766 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1767 &tx_max_burst_prd); 1768 dwc->do_fifo_resize = device_property_read_bool(dev, 1769 "tx-fifo-resize"); 1770 if (dwc->do_fifo_resize) 1771 device_property_read_u8(dev, "tx-fifo-max-num", 1772 &tx_fifo_resize_max_num); 1773 1774 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1775 "snps,disable_scramble_quirk"); 1776 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1777 "snps,u2exit_lfps_quirk"); 1778 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1779 "snps,u2ss_inp3_quirk"); 1780 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1781 "snps,req_p1p2p3_quirk"); 1782 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1783 "snps,del_p1p2p3_quirk"); 1784 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1785 "snps,del_phy_power_chg_quirk"); 1786 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1787 "snps,lfps_filter_quirk"); 1788 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1789 "snps,rx_detect_poll_quirk"); 1790 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1791 "snps,dis_u3_susphy_quirk"); 1792 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1793 "snps,dis_u2_susphy_quirk"); 1794 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1795 "snps,dis_enblslpm_quirk"); 1796 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1797 "snps,dis-u1-entry-quirk"); 1798 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1799 "snps,dis-u2-entry-quirk"); 1800 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1801 "snps,dis_rxdet_inp3_quirk"); 1802 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1803 "snps,dis-u2-freeclk-exists-quirk"); 1804 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1805 "snps,dis-del-phy-power-chg-quirk"); 1806 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1807 "snps,dis-tx-ipgap-linecheck-quirk"); 1808 dwc->resume_hs_terminations = device_property_read_bool(dev, 1809 "snps,resume-hs-terminations"); 1810 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1811 "snps,ulpi-ext-vbus-drv"); 1812 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1813 "snps,parkmode-disable-ss-quirk"); 1814 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1815 "snps,parkmode-disable-hs-quirk"); 1816 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1817 "snps,gfladj-refclk-lpm-sel-quirk"); 1818 1819 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1820 "snps,tx_de_emphasis_quirk"); 1821 device_property_read_u8(dev, "snps,tx_de_emphasis", 1822 &tx_de_emphasis); 1823 device_property_read_string(dev, "snps,hsphy_interface", 1824 &dwc->hsphy_interface); 1825 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1826 &dwc->fladj); 1827 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1828 &dwc->ref_clk_per); 1829 1830 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1831 "snps,dis_metastability_quirk"); 1832 1833 dwc->dis_split_quirk = device_property_read_bool(dev, 1834 "snps,dis-split-quirk"); 1835 1836 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1837 dwc->tx_de_emphasis = tx_de_emphasis; 1838 1839 dwc->hird_threshold = hird_threshold; 1840 1841 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1842 dwc->rx_max_burst = rx_max_burst; 1843 1844 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1845 dwc->tx_max_burst = tx_max_burst; 1846 1847 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1848 dwc->rx_max_burst_prd = rx_max_burst_prd; 1849 1850 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1851 dwc->tx_max_burst_prd = tx_max_burst_prd; 1852 1853 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1854 } 1855 1856 /* check whether the core supports IMOD */ 1857 bool dwc3_has_imod(struct dwc3 *dwc) 1858 { 1859 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1860 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1861 DWC3_IP_IS(DWC32); 1862 } 1863 1864 static void dwc3_check_params(struct dwc3 *dwc) 1865 { 1866 struct device *dev = dwc->dev; 1867 unsigned int hwparam_gen = 1868 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1869 1870 /* 1871 * Enable IMOD for all supporting controllers. 1872 * 1873 * Particularly, DWC_usb3 v3.00a must enable this feature for 1874 * the following reason: 1875 * 1876 * Workaround for STAR 9000961433 which affects only version 1877 * 3.00a of the DWC_usb3 core. This prevents the controller 1878 * interrupt from being masked while handling events. IMOD 1879 * allows us to work around this issue. Enable it for the 1880 * affected version. 1881 */ 1882 if (dwc3_has_imod((dwc))) 1883 dwc->imod_interval = 1; 1884 1885 /* Check the maximum_speed parameter */ 1886 switch (dwc->maximum_speed) { 1887 case USB_SPEED_FULL: 1888 case USB_SPEED_HIGH: 1889 break; 1890 case USB_SPEED_SUPER: 1891 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1892 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1893 break; 1894 case USB_SPEED_SUPER_PLUS: 1895 if ((DWC3_IP_IS(DWC32) && 1896 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1897 (!DWC3_IP_IS(DWC32) && 1898 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1899 dev_warn(dev, "UDC doesn't support SSP\n"); 1900 break; 1901 default: 1902 dev_err(dev, "invalid maximum_speed parameter %d\n", 1903 dwc->maximum_speed); 1904 fallthrough; 1905 case USB_SPEED_UNKNOWN: 1906 switch (hwparam_gen) { 1907 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1908 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1909 break; 1910 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1911 if (DWC3_IP_IS(DWC32)) 1912 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1913 else 1914 dwc->maximum_speed = USB_SPEED_SUPER; 1915 break; 1916 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1917 dwc->maximum_speed = USB_SPEED_HIGH; 1918 break; 1919 default: 1920 dwc->maximum_speed = USB_SPEED_SUPER; 1921 break; 1922 } 1923 break; 1924 } 1925 1926 /* 1927 * Currently the controller does not have visibility into the HW 1928 * parameter to determine the maximum number of lanes the HW supports. 1929 * If the number of lanes is not specified in the device property, then 1930 * set the default to support dual-lane for DWC_usb32 and single-lane 1931 * for DWC_usb31 for super-speed-plus. 1932 */ 1933 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1934 switch (dwc->max_ssp_rate) { 1935 case USB_SSP_GEN_2x1: 1936 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1937 dev_warn(dev, "UDC only supports Gen 1\n"); 1938 break; 1939 case USB_SSP_GEN_1x2: 1940 case USB_SSP_GEN_2x2: 1941 if (DWC3_IP_IS(DWC31)) 1942 dev_warn(dev, "UDC only supports single lane\n"); 1943 break; 1944 case USB_SSP_GEN_UNKNOWN: 1945 default: 1946 switch (hwparam_gen) { 1947 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1948 if (DWC3_IP_IS(DWC32)) 1949 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1950 else 1951 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1952 break; 1953 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1954 if (DWC3_IP_IS(DWC32)) 1955 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1956 break; 1957 } 1958 break; 1959 } 1960 } 1961 } 1962 1963 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1964 { 1965 struct device *dev = dwc->dev; 1966 struct device_node *np_phy; 1967 struct extcon_dev *edev = NULL; 1968 const char *name; 1969 1970 if (device_property_present(dev, "extcon")) 1971 return extcon_get_edev_by_phandle(dev, 0); 1972 1973 /* 1974 * Device tree platforms should get extcon via phandle. 1975 * On ACPI platforms, we get the name from a device property. 1976 * This device property is for kernel internal use only and 1977 * is expected to be set by the glue code. 1978 */ 1979 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 1980 return extcon_get_extcon_dev(name); 1981 1982 /* 1983 * Check explicitly if "usb-role-switch" is used since 1984 * extcon_find_edev_by_node() can not be used to check the absence of 1985 * an extcon device. In the absence of an device it will always return 1986 * EPROBE_DEFER. 1987 */ 1988 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 1989 device_property_read_bool(dev, "usb-role-switch")) 1990 return NULL; 1991 1992 /* 1993 * Try to get an extcon device from the USB PHY controller's "port" 1994 * node. Check if it has the "port" node first, to avoid printing the 1995 * error message from underlying code, as it's a valid case: extcon 1996 * device (and "port" node) may be missing in case of "usb-role-switch" 1997 * or OTG mode. 1998 */ 1999 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 2000 if (of_graph_is_present(np_phy)) { 2001 struct device_node *np_conn; 2002 2003 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 2004 if (np_conn) 2005 edev = extcon_find_edev_by_node(np_conn); 2006 of_node_put(np_conn); 2007 } 2008 of_node_put(np_phy); 2009 2010 return edev; 2011 } 2012 2013 static int dwc3_get_clocks(struct dwc3 *dwc) 2014 { 2015 struct device *dev = dwc->dev; 2016 2017 if (!dev->of_node) 2018 return 0; 2019 2020 /* 2021 * Clocks are optional, but new DT platforms should support all clocks 2022 * as required by the DT-binding. 2023 * Some devices have different clock names in legacy device trees, 2024 * check for them to retain backwards compatibility. 2025 */ 2026 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 2027 if (IS_ERR(dwc->bus_clk)) { 2028 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2029 "could not get bus clock\n"); 2030 } 2031 2032 if (dwc->bus_clk == NULL) { 2033 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 2034 if (IS_ERR(dwc->bus_clk)) { 2035 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2036 "could not get bus clock\n"); 2037 } 2038 } 2039 2040 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 2041 if (IS_ERR(dwc->ref_clk)) { 2042 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2043 "could not get ref clock\n"); 2044 } 2045 2046 if (dwc->ref_clk == NULL) { 2047 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 2048 if (IS_ERR(dwc->ref_clk)) { 2049 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2050 "could not get ref clock\n"); 2051 } 2052 } 2053 2054 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 2055 if (IS_ERR(dwc->susp_clk)) { 2056 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2057 "could not get suspend clock\n"); 2058 } 2059 2060 if (dwc->susp_clk == NULL) { 2061 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 2062 if (IS_ERR(dwc->susp_clk)) { 2063 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2064 "could not get suspend clock\n"); 2065 } 2066 } 2067 2068 /* specific to Rockchip RK3588 */ 2069 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 2070 if (IS_ERR(dwc->utmi_clk)) { 2071 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 2072 "could not get utmi clock\n"); 2073 } 2074 2075 /* specific to Rockchip RK3588 */ 2076 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 2077 if (IS_ERR(dwc->pipe_clk)) { 2078 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 2079 "could not get pipe clock\n"); 2080 } 2081 2082 return 0; 2083 } 2084 2085 static int dwc3_get_num_ports(struct dwc3 *dwc) 2086 { 2087 void __iomem *base; 2088 u8 major_revision; 2089 u32 offset; 2090 u32 val; 2091 2092 /* 2093 * Remap xHCI address space to access XHCI ext cap regs since it is 2094 * needed to get information on number of ports present. 2095 */ 2096 base = ioremap(dwc->xhci_resources[0].start, 2097 resource_size(&dwc->xhci_resources[0])); 2098 if (!base) 2099 return -ENOMEM; 2100 2101 offset = 0; 2102 do { 2103 offset = xhci_find_next_ext_cap(base, offset, 2104 XHCI_EXT_CAPS_PROTOCOL); 2105 if (!offset) 2106 break; 2107 2108 val = readl(base + offset); 2109 major_revision = XHCI_EXT_PORT_MAJOR(val); 2110 2111 val = readl(base + offset + 0x08); 2112 if (major_revision == 0x03) { 2113 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val); 2114 } else if (major_revision <= 0x02) { 2115 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val); 2116 } else { 2117 dev_warn(dwc->dev, "unrecognized port major revision %d\n", 2118 major_revision); 2119 } 2120 } while (1); 2121 2122 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n", 2123 dwc->num_usb2_ports, dwc->num_usb3_ports); 2124 2125 iounmap(base); 2126 2127 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS || 2128 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS) 2129 return -EINVAL; 2130 2131 return 0; 2132 } 2133 2134 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) 2135 { 2136 struct power_supply *usb_psy; 2137 const char *usb_psy_name; 2138 int ret; 2139 2140 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); 2141 if (ret < 0) 2142 return NULL; 2143 2144 usb_psy = power_supply_get_by_name(usb_psy_name); 2145 if (!usb_psy) 2146 return ERR_PTR(-EPROBE_DEFER); 2147 2148 return usb_psy; 2149 } 2150 2151 static int dwc3_probe(struct platform_device *pdev) 2152 { 2153 struct device *dev = &pdev->dev; 2154 struct resource *res, dwc_res; 2155 unsigned int hw_mode; 2156 void __iomem *regs; 2157 struct dwc3 *dwc; 2158 int ret; 2159 2160 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); 2161 if (!dwc) 2162 return -ENOMEM; 2163 2164 dwc->dev = dev; 2165 2166 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2167 if (!res) { 2168 dev_err(dev, "missing memory resource\n"); 2169 return -ENODEV; 2170 } 2171 2172 dwc->xhci_resources[0].start = res->start; 2173 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 2174 DWC3_XHCI_REGS_END; 2175 dwc->xhci_resources[0].flags = res->flags; 2176 dwc->xhci_resources[0].name = res->name; 2177 2178 /* 2179 * Request memory region but exclude xHCI regs, 2180 * since it will be requested by the xhci-plat driver. 2181 */ 2182 dwc_res = *res; 2183 dwc_res.start += DWC3_GLOBALS_REGS_START; 2184 2185 if (dev->of_node) { 2186 struct device_node *parent = of_get_parent(dev->of_node); 2187 2188 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 2189 dwc_res.start -= DWC3_GLOBALS_REGS_START; 2190 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 2191 } 2192 2193 of_node_put(parent); 2194 } 2195 2196 regs = devm_ioremap_resource(dev, &dwc_res); 2197 if (IS_ERR(regs)) 2198 return PTR_ERR(regs); 2199 2200 dwc->regs = regs; 2201 dwc->regs_size = resource_size(&dwc_res); 2202 2203 dwc3_get_properties(dwc); 2204 2205 dwc3_get_software_properties(dwc); 2206 2207 dwc->usb_psy = dwc3_get_usb_power_supply(dwc); 2208 if (IS_ERR(dwc->usb_psy)) 2209 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); 2210 2211 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 2212 if (IS_ERR(dwc->reset)) { 2213 ret = PTR_ERR(dwc->reset); 2214 goto err_put_psy; 2215 } 2216 2217 ret = dwc3_get_clocks(dwc); 2218 if (ret) 2219 goto err_put_psy; 2220 2221 ret = reset_control_deassert(dwc->reset); 2222 if (ret) 2223 goto err_put_psy; 2224 2225 ret = dwc3_clk_enable(dwc); 2226 if (ret) 2227 goto err_assert_reset; 2228 2229 if (!dwc3_core_is_valid(dwc)) { 2230 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 2231 ret = -ENODEV; 2232 goto err_disable_clks; 2233 } 2234 2235 platform_set_drvdata(pdev, dwc); 2236 dwc3_cache_hwparams(dwc); 2237 2238 if (!dwc->sysdev_is_parent && 2239 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 2240 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 2241 if (ret) 2242 goto err_disable_clks; 2243 } 2244 2245 /* 2246 * Currently only DWC3 controllers that are host-only capable 2247 * can have more than one port. 2248 */ 2249 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 2250 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 2251 ret = dwc3_get_num_ports(dwc); 2252 if (ret) 2253 goto err_disable_clks; 2254 } else { 2255 dwc->num_usb2_ports = 1; 2256 dwc->num_usb3_ports = 1; 2257 } 2258 2259 spin_lock_init(&dwc->lock); 2260 mutex_init(&dwc->mutex); 2261 2262 pm_runtime_get_noresume(dev); 2263 pm_runtime_set_active(dev); 2264 pm_runtime_use_autosuspend(dev); 2265 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 2266 pm_runtime_enable(dev); 2267 2268 pm_runtime_forbid(dev); 2269 2270 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 2271 if (ret) { 2272 dev_err(dwc->dev, "failed to allocate event buffers\n"); 2273 ret = -ENOMEM; 2274 goto err_allow_rpm; 2275 } 2276 2277 dwc->edev = dwc3_get_extcon(dwc); 2278 if (IS_ERR(dwc->edev)) { 2279 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 2280 goto err_free_event_buffers; 2281 } 2282 2283 ret = dwc3_get_dr_mode(dwc); 2284 if (ret) 2285 goto err_free_event_buffers; 2286 2287 ret = dwc3_core_init(dwc); 2288 if (ret) { 2289 dev_err_probe(dev, ret, "failed to initialize core\n"); 2290 goto err_free_event_buffers; 2291 } 2292 2293 dwc3_check_params(dwc); 2294 dwc3_debugfs_init(dwc); 2295 2296 ret = dwc3_core_init_mode(dwc); 2297 if (ret) 2298 goto err_exit_debugfs; 2299 2300 pm_runtime_put(dev); 2301 2302 dma_set_max_seg_size(dev, UINT_MAX); 2303 2304 return 0; 2305 2306 err_exit_debugfs: 2307 dwc3_debugfs_exit(dwc); 2308 dwc3_event_buffers_cleanup(dwc); 2309 dwc3_phy_power_off(dwc); 2310 dwc3_phy_exit(dwc); 2311 dwc3_ulpi_exit(dwc); 2312 err_free_event_buffers: 2313 dwc3_free_event_buffers(dwc); 2314 err_allow_rpm: 2315 pm_runtime_allow(dev); 2316 pm_runtime_disable(dev); 2317 pm_runtime_dont_use_autosuspend(dev); 2318 pm_runtime_set_suspended(dev); 2319 pm_runtime_put_noidle(dev); 2320 err_disable_clks: 2321 dwc3_clk_disable(dwc); 2322 err_assert_reset: 2323 reset_control_assert(dwc->reset); 2324 err_put_psy: 2325 if (dwc->usb_psy) 2326 power_supply_put(dwc->usb_psy); 2327 2328 return ret; 2329 } 2330 2331 static void dwc3_remove(struct platform_device *pdev) 2332 { 2333 struct dwc3 *dwc = platform_get_drvdata(pdev); 2334 2335 pm_runtime_get_sync(&pdev->dev); 2336 2337 dwc3_core_exit_mode(dwc); 2338 dwc3_debugfs_exit(dwc); 2339 2340 dwc3_core_exit(dwc); 2341 dwc3_ulpi_exit(dwc); 2342 2343 pm_runtime_allow(&pdev->dev); 2344 pm_runtime_disable(&pdev->dev); 2345 pm_runtime_dont_use_autosuspend(&pdev->dev); 2346 pm_runtime_put_noidle(&pdev->dev); 2347 /* 2348 * HACK: Clear the driver data, which is currently accessed by parent 2349 * glue drivers, before allowing the parent to suspend. 2350 */ 2351 platform_set_drvdata(pdev, NULL); 2352 pm_runtime_set_suspended(&pdev->dev); 2353 2354 dwc3_free_event_buffers(dwc); 2355 2356 if (dwc->usb_psy) 2357 power_supply_put(dwc->usb_psy); 2358 } 2359 2360 #ifdef CONFIG_PM 2361 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2362 { 2363 int ret; 2364 2365 ret = reset_control_deassert(dwc->reset); 2366 if (ret) 2367 return ret; 2368 2369 ret = dwc3_clk_enable(dwc); 2370 if (ret) 2371 goto assert_reset; 2372 2373 ret = dwc3_core_init(dwc); 2374 if (ret) 2375 goto disable_clks; 2376 2377 return 0; 2378 2379 disable_clks: 2380 dwc3_clk_disable(dwc); 2381 assert_reset: 2382 reset_control_assert(dwc->reset); 2383 2384 return ret; 2385 } 2386 2387 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2388 { 2389 u32 reg; 2390 int i; 2391 2392 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { 2393 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & 2394 DWC3_GUSB2PHYCFG_SUSPHY) || 2395 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & 2396 DWC3_GUSB3PIPECTL_SUSPHY); 2397 /* 2398 * TI AM62 platform requires SUSPHY to be 2399 * enabled for system suspend to work. 2400 */ 2401 if (!dwc->susphy_state) 2402 dwc3_enable_susphy(dwc, true); 2403 } 2404 2405 switch (dwc->current_dr_role) { 2406 case DWC3_GCTL_PRTCAP_DEVICE: 2407 if (pm_runtime_suspended(dwc->dev)) 2408 break; 2409 dwc3_gadget_suspend(dwc); 2410 synchronize_irq(dwc->irq_gadget); 2411 dwc3_core_exit(dwc); 2412 break; 2413 case DWC3_GCTL_PRTCAP_HOST: 2414 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2415 dwc3_core_exit(dwc); 2416 break; 2417 } 2418 2419 /* Let controller to suspend HSPHY before PHY driver suspends */ 2420 if (dwc->dis_u2_susphy_quirk || 2421 dwc->dis_enblslpm_quirk) { 2422 for (i = 0; i < dwc->num_usb2_ports; i++) { 2423 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2424 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2425 DWC3_GUSB2PHYCFG_SUSPHY; 2426 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2427 } 2428 2429 /* Give some time for USB2 PHY to suspend */ 2430 usleep_range(5000, 6000); 2431 } 2432 2433 for (i = 0; i < dwc->num_usb2_ports; i++) 2434 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]); 2435 for (i = 0; i < dwc->num_usb3_ports; i++) 2436 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]); 2437 break; 2438 case DWC3_GCTL_PRTCAP_OTG: 2439 /* do nothing during runtime_suspend */ 2440 if (PMSG_IS_AUTO(msg)) 2441 break; 2442 2443 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2444 dwc3_gadget_suspend(dwc); 2445 synchronize_irq(dwc->irq_gadget); 2446 } 2447 2448 dwc3_otg_exit(dwc); 2449 dwc3_core_exit(dwc); 2450 break; 2451 default: 2452 /* do nothing */ 2453 break; 2454 } 2455 2456 return 0; 2457 } 2458 2459 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2460 { 2461 int ret; 2462 u32 reg; 2463 int i; 2464 2465 switch (dwc->current_dr_role) { 2466 case DWC3_GCTL_PRTCAP_DEVICE: 2467 ret = dwc3_core_init_for_resume(dwc); 2468 if (ret) 2469 return ret; 2470 2471 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 2472 dwc3_gadget_resume(dwc); 2473 break; 2474 case DWC3_GCTL_PRTCAP_HOST: 2475 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2476 ret = dwc3_core_init_for_resume(dwc); 2477 if (ret) 2478 return ret; 2479 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); 2480 break; 2481 } 2482 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2483 for (i = 0; i < dwc->num_usb2_ports; i++) { 2484 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2485 if (dwc->dis_u2_susphy_quirk) 2486 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2487 2488 if (dwc->dis_enblslpm_quirk) 2489 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2490 2491 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2492 } 2493 2494 for (i = 0; i < dwc->num_usb2_ports; i++) 2495 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]); 2496 for (i = 0; i < dwc->num_usb3_ports; i++) 2497 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]); 2498 break; 2499 case DWC3_GCTL_PRTCAP_OTG: 2500 /* nothing to do on runtime_resume */ 2501 if (PMSG_IS_AUTO(msg)) 2502 break; 2503 2504 ret = dwc3_core_init_for_resume(dwc); 2505 if (ret) 2506 return ret; 2507 2508 dwc3_set_prtcap(dwc, dwc->current_dr_role, true); 2509 2510 dwc3_otg_init(dwc); 2511 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2512 dwc3_otg_host_init(dwc); 2513 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2514 dwc3_gadget_resume(dwc); 2515 } 2516 2517 break; 2518 default: 2519 /* do nothing */ 2520 break; 2521 } 2522 2523 if (!PMSG_IS_AUTO(msg)) { 2524 /* restore SUSPHY state to that before system suspend. */ 2525 dwc3_enable_susphy(dwc, dwc->susphy_state); 2526 } 2527 2528 return 0; 2529 } 2530 2531 static int dwc3_runtime_checks(struct dwc3 *dwc) 2532 { 2533 switch (dwc->current_dr_role) { 2534 case DWC3_GCTL_PRTCAP_DEVICE: 2535 if (dwc->connected) 2536 return -EBUSY; 2537 break; 2538 case DWC3_GCTL_PRTCAP_HOST: 2539 default: 2540 /* do nothing */ 2541 break; 2542 } 2543 2544 return 0; 2545 } 2546 2547 static int dwc3_runtime_suspend(struct device *dev) 2548 { 2549 struct dwc3 *dwc = dev_get_drvdata(dev); 2550 int ret; 2551 2552 if (dwc3_runtime_checks(dwc)) 2553 return -EBUSY; 2554 2555 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2556 if (ret) 2557 return ret; 2558 2559 return 0; 2560 } 2561 2562 static int dwc3_runtime_resume(struct device *dev) 2563 { 2564 struct dwc3 *dwc = dev_get_drvdata(dev); 2565 int ret; 2566 2567 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2568 if (ret) 2569 return ret; 2570 2571 switch (dwc->current_dr_role) { 2572 case DWC3_GCTL_PRTCAP_DEVICE: 2573 if (dwc->pending_events) { 2574 pm_runtime_put(dwc->dev); 2575 dwc->pending_events = false; 2576 enable_irq(dwc->irq_gadget); 2577 } 2578 break; 2579 case DWC3_GCTL_PRTCAP_HOST: 2580 default: 2581 /* do nothing */ 2582 break; 2583 } 2584 2585 pm_runtime_mark_last_busy(dev); 2586 2587 return 0; 2588 } 2589 2590 static int dwc3_runtime_idle(struct device *dev) 2591 { 2592 struct dwc3 *dwc = dev_get_drvdata(dev); 2593 2594 switch (dwc->current_dr_role) { 2595 case DWC3_GCTL_PRTCAP_DEVICE: 2596 if (dwc3_runtime_checks(dwc)) 2597 return -EBUSY; 2598 break; 2599 case DWC3_GCTL_PRTCAP_HOST: 2600 default: 2601 /* do nothing */ 2602 break; 2603 } 2604 2605 pm_runtime_mark_last_busy(dev); 2606 pm_runtime_autosuspend(dev); 2607 2608 return 0; 2609 } 2610 #endif /* CONFIG_PM */ 2611 2612 #ifdef CONFIG_PM_SLEEP 2613 static int dwc3_suspend(struct device *dev) 2614 { 2615 struct dwc3 *dwc = dev_get_drvdata(dev); 2616 int ret; 2617 2618 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2619 if (ret) 2620 return ret; 2621 2622 pinctrl_pm_select_sleep_state(dev); 2623 2624 return 0; 2625 } 2626 2627 static int dwc3_resume(struct device *dev) 2628 { 2629 struct dwc3 *dwc = dev_get_drvdata(dev); 2630 int ret = 0; 2631 2632 pinctrl_pm_select_default_state(dev); 2633 2634 pm_runtime_disable(dev); 2635 ret = pm_runtime_set_active(dev); 2636 if (ret) 2637 goto out; 2638 2639 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2640 if (ret) 2641 pm_runtime_set_suspended(dev); 2642 2643 out: 2644 pm_runtime_enable(dev); 2645 2646 return ret; 2647 } 2648 2649 static void dwc3_complete(struct device *dev) 2650 { 2651 struct dwc3 *dwc = dev_get_drvdata(dev); 2652 u32 reg; 2653 2654 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2655 dwc->dis_split_quirk) { 2656 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2657 reg |= DWC3_GUCTL3_SPLITDISABLE; 2658 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2659 } 2660 } 2661 #else 2662 #define dwc3_complete NULL 2663 #endif /* CONFIG_PM_SLEEP */ 2664 2665 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2666 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) 2667 .complete = dwc3_complete, 2668 2669 /* 2670 * Runtime suspend halts the controller on disconnection. It relies on 2671 * platforms with custom connection notification to start the controller 2672 * again. 2673 */ 2674 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, 2675 dwc3_runtime_idle) 2676 }; 2677 2678 #ifdef CONFIG_OF 2679 static const struct of_device_id of_dwc3_match[] = { 2680 { 2681 .compatible = "snps,dwc3" 2682 }, 2683 { 2684 .compatible = "synopsys,dwc3" 2685 }, 2686 { }, 2687 }; 2688 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2689 #endif 2690 2691 #ifdef CONFIG_ACPI 2692 2693 #define ACPI_ID_INTEL_BSW "808622B7" 2694 2695 static const struct acpi_device_id dwc3_acpi_match[] = { 2696 { ACPI_ID_INTEL_BSW, 0 }, 2697 { }, 2698 }; 2699 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2700 #endif 2701 2702 static struct platform_driver dwc3_driver = { 2703 .probe = dwc3_probe, 2704 .remove = dwc3_remove, 2705 .driver = { 2706 .name = "dwc3", 2707 .of_match_table = of_match_ptr(of_dwc3_match), 2708 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2709 .pm = &dwc3_dev_pm_ops, 2710 }, 2711 }; 2712 2713 module_platform_driver(dwc3_driver); 2714 2715 MODULE_ALIAS("platform:dwc3"); 2716 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2717 MODULE_LICENSE("GPL v2"); 2718 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2719