1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_graph.h> 27 #include <linux/acpi.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/reset.h> 30 #include <linux/bitfield.h> 31 32 #include <linux/usb/ch9.h> 33 #include <linux/usb/gadget.h> 34 #include <linux/usb/of.h> 35 #include <linux/usb/otg.h> 36 37 #include "core.h" 38 #include "gadget.h" 39 #include "io.h" 40 41 #include "debug.h" 42 #include "../host/xhci-ext-caps.h" 43 44 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 45 46 /** 47 * dwc3_get_dr_mode - Validates and sets dr_mode 48 * @dwc: pointer to our context structure 49 */ 50 static int dwc3_get_dr_mode(struct dwc3 *dwc) 51 { 52 enum usb_dr_mode mode; 53 struct device *dev = dwc->dev; 54 unsigned int hw_mode; 55 56 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 57 dwc->dr_mode = USB_DR_MODE_OTG; 58 59 mode = dwc->dr_mode; 60 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 61 62 switch (hw_mode) { 63 case DWC3_GHWPARAMS0_MODE_GADGET: 64 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 65 dev_err(dev, 66 "Controller does not support host mode.\n"); 67 return -EINVAL; 68 } 69 mode = USB_DR_MODE_PERIPHERAL; 70 break; 71 case DWC3_GHWPARAMS0_MODE_HOST: 72 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 73 dev_err(dev, 74 "Controller does not support device mode.\n"); 75 return -EINVAL; 76 } 77 mode = USB_DR_MODE_HOST; 78 break; 79 default: 80 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 81 mode = USB_DR_MODE_HOST; 82 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 83 mode = USB_DR_MODE_PERIPHERAL; 84 85 /* 86 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 87 * mode. If the controller supports DRD but the dr_mode is not 88 * specified or set to OTG, then set the mode to peripheral. 89 */ 90 if (mode == USB_DR_MODE_OTG && !dwc->edev && 91 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 92 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 93 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 94 mode = USB_DR_MODE_PERIPHERAL; 95 } 96 97 if (mode != dwc->dr_mode) { 98 dev_warn(dev, 99 "Configuration mismatch. dr_mode forced to %s\n", 100 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 101 102 dwc->dr_mode = mode; 103 } 104 105 return 0; 106 } 107 108 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) 109 { 110 u32 reg; 111 int i; 112 113 for (i = 0; i < dwc->num_usb3_ports; i++) { 114 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i)); 115 if (enable && !dwc->dis_u3_susphy_quirk) 116 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 117 else 118 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 119 120 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg); 121 } 122 123 for (i = 0; i < dwc->num_usb2_ports; i++) { 124 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 125 if (enable && !dwc->dis_u2_susphy_quirk) 126 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 127 else 128 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 129 130 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 131 } 132 } 133 134 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) 135 { 136 unsigned int hw_mode; 137 u32 reg; 138 139 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 140 141 /* 142 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and 143 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, 144 * and they can be set after core initialization. 145 */ 146 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 147 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { 148 if (DWC3_GCTL_PRTCAP(reg) != mode) 149 dwc3_enable_susphy(dwc, false); 150 } 151 152 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 153 reg |= DWC3_GCTL_PRTCAPDIR(mode); 154 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 155 156 dwc->current_dr_role = mode; 157 } 158 159 static void __dwc3_set_mode(struct work_struct *work) 160 { 161 struct dwc3 *dwc = work_to_dwc(work); 162 unsigned long flags; 163 int ret; 164 u32 reg; 165 u32 desired_dr_role; 166 int i; 167 168 mutex_lock(&dwc->mutex); 169 spin_lock_irqsave(&dwc->lock, flags); 170 desired_dr_role = dwc->desired_dr_role; 171 spin_unlock_irqrestore(&dwc->lock, flags); 172 173 pm_runtime_get_sync(dwc->dev); 174 175 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 176 dwc3_otg_update(dwc, 0); 177 178 if (!desired_dr_role) 179 goto out; 180 181 if (desired_dr_role == dwc->current_dr_role) 182 goto out; 183 184 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 185 goto out; 186 187 switch (dwc->current_dr_role) { 188 case DWC3_GCTL_PRTCAP_HOST: 189 dwc3_host_exit(dwc); 190 break; 191 case DWC3_GCTL_PRTCAP_DEVICE: 192 dwc3_gadget_exit(dwc); 193 dwc3_event_buffers_cleanup(dwc); 194 break; 195 case DWC3_GCTL_PRTCAP_OTG: 196 dwc3_otg_exit(dwc); 197 spin_lock_irqsave(&dwc->lock, flags); 198 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 199 spin_unlock_irqrestore(&dwc->lock, flags); 200 dwc3_otg_update(dwc, 1); 201 break; 202 default: 203 break; 204 } 205 206 /* 207 * When current_dr_role is not set, there's no role switching. 208 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 209 */ 210 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 211 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 212 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 213 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 214 reg |= DWC3_GCTL_CORESOFTRESET; 215 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 216 217 /* 218 * Wait for internal clocks to synchronized. DWC_usb31 and 219 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 220 * keep it consistent across different IPs, let's wait up to 221 * 100ms before clearing GCTL.CORESOFTRESET. 222 */ 223 msleep(100); 224 225 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 226 reg &= ~DWC3_GCTL_CORESOFTRESET; 227 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 228 } 229 230 spin_lock_irqsave(&dwc->lock, flags); 231 232 dwc3_set_prtcap(dwc, desired_dr_role, false); 233 234 spin_unlock_irqrestore(&dwc->lock, flags); 235 236 switch (desired_dr_role) { 237 case DWC3_GCTL_PRTCAP_HOST: 238 ret = dwc3_host_init(dwc); 239 if (ret) { 240 dev_err(dwc->dev, "failed to initialize host\n"); 241 } else { 242 if (dwc->usb2_phy) 243 otg_set_vbus(dwc->usb2_phy->otg, true); 244 245 for (i = 0; i < dwc->num_usb2_ports; i++) 246 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 247 for (i = 0; i < dwc->num_usb3_ports; i++) 248 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 249 250 if (dwc->dis_split_quirk) { 251 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 252 reg |= DWC3_GUCTL3_SPLITDISABLE; 253 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 254 } 255 } 256 break; 257 case DWC3_GCTL_PRTCAP_DEVICE: 258 dwc3_core_soft_reset(dwc); 259 260 dwc3_event_buffers_setup(dwc); 261 262 if (dwc->usb2_phy) 263 otg_set_vbus(dwc->usb2_phy->otg, false); 264 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 265 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 266 267 ret = dwc3_gadget_init(dwc); 268 if (ret) 269 dev_err(dwc->dev, "failed to initialize peripheral\n"); 270 break; 271 case DWC3_GCTL_PRTCAP_OTG: 272 dwc3_otg_init(dwc); 273 dwc3_otg_update(dwc, 0); 274 break; 275 default: 276 break; 277 } 278 279 out: 280 pm_runtime_mark_last_busy(dwc->dev); 281 pm_runtime_put_autosuspend(dwc->dev); 282 mutex_unlock(&dwc->mutex); 283 } 284 285 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 286 { 287 unsigned long flags; 288 289 if (dwc->dr_mode != USB_DR_MODE_OTG) 290 return; 291 292 spin_lock_irqsave(&dwc->lock, flags); 293 dwc->desired_dr_role = mode; 294 spin_unlock_irqrestore(&dwc->lock, flags); 295 296 queue_work(system_freezable_wq, &dwc->drd_work); 297 } 298 299 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 300 { 301 struct dwc3 *dwc = dep->dwc; 302 u32 reg; 303 304 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 305 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 306 DWC3_GDBGFIFOSPACE_TYPE(type)); 307 308 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 309 310 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 311 } 312 313 /** 314 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 315 * @dwc: pointer to our context structure 316 */ 317 int dwc3_core_soft_reset(struct dwc3 *dwc) 318 { 319 u32 reg; 320 int retries = 1000; 321 322 /* 323 * We're resetting only the device side because, if we're in host mode, 324 * XHCI driver will reset the host block. If dwc3 was configured for 325 * host-only mode, then we can return early. 326 */ 327 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 328 return 0; 329 330 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 331 reg |= DWC3_DCTL_CSFTRST; 332 reg &= ~DWC3_DCTL_RUN_STOP; 333 dwc3_gadget_dctl_write_safe(dwc, reg); 334 335 /* 336 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 337 * is cleared only after all the clocks are synchronized. This can 338 * take a little more than 50ms. Set the polling rate at 20ms 339 * for 10 times instead. 340 */ 341 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 342 retries = 10; 343 344 do { 345 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 346 if (!(reg & DWC3_DCTL_CSFTRST)) 347 goto done; 348 349 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 350 msleep(20); 351 else 352 udelay(1); 353 } while (--retries); 354 355 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 356 return -ETIMEDOUT; 357 358 done: 359 /* 360 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 361 * is cleared, we must wait at least 50ms before accessing the PHY 362 * domain (synchronization delay). 363 */ 364 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 365 msleep(50); 366 367 return 0; 368 } 369 370 /* 371 * dwc3_frame_length_adjustment - Adjusts frame length if required 372 * @dwc3: Pointer to our controller context structure 373 */ 374 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 375 { 376 u32 reg; 377 u32 dft; 378 379 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 380 return; 381 382 if (dwc->fladj == 0) 383 return; 384 385 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 386 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 387 if (dft != dwc->fladj) { 388 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 389 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 390 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 391 } 392 } 393 394 /** 395 * dwc3_ref_clk_period - Reference clock period configuration 396 * Default reference clock period depends on hardware 397 * configuration. For systems with reference clock that differs 398 * from the default, this will set clock period in DWC3_GUCTL 399 * register. 400 * @dwc: Pointer to our controller context structure 401 */ 402 static void dwc3_ref_clk_period(struct dwc3 *dwc) 403 { 404 unsigned long period; 405 unsigned long fladj; 406 unsigned long decr; 407 unsigned long rate; 408 u32 reg; 409 410 if (dwc->ref_clk) { 411 rate = clk_get_rate(dwc->ref_clk); 412 if (!rate) 413 return; 414 period = NSEC_PER_SEC / rate; 415 } else if (dwc->ref_clk_per) { 416 period = dwc->ref_clk_per; 417 rate = NSEC_PER_SEC / period; 418 } else { 419 return; 420 } 421 422 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 423 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 424 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 425 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 426 427 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 428 return; 429 430 /* 431 * The calculation below is 432 * 433 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 434 * 435 * but rearranged for fixed-point arithmetic. The division must be 436 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 437 * neither does rate * period). 438 * 439 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 440 * nanoseconds of error caused by the truncation which happened during 441 * the division when calculating rate or period (whichever one was 442 * derived from the other). We first calculate the relative error, then 443 * scale it to units of 8 ppm. 444 */ 445 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 446 fladj -= 125000; 447 448 /* 449 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 450 */ 451 decr = 480000000 / rate; 452 453 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 454 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 455 & ~DWC3_GFLADJ_240MHZDECR 456 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 457 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 458 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 459 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 460 461 if (dwc->gfladj_refclk_lpm_sel) 462 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 463 464 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 465 } 466 467 /** 468 * dwc3_free_one_event_buffer - Frees one event buffer 469 * @dwc: Pointer to our controller context structure 470 * @evt: Pointer to event buffer to be freed 471 */ 472 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 473 struct dwc3_event_buffer *evt) 474 { 475 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 476 } 477 478 /** 479 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 480 * @dwc: Pointer to our controller context structure 481 * @length: size of the event buffer 482 * 483 * Returns a pointer to the allocated event buffer structure on success 484 * otherwise ERR_PTR(errno). 485 */ 486 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 487 unsigned int length) 488 { 489 struct dwc3_event_buffer *evt; 490 491 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 492 if (!evt) 493 return ERR_PTR(-ENOMEM); 494 495 evt->dwc = dwc; 496 evt->length = length; 497 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 498 if (!evt->cache) 499 return ERR_PTR(-ENOMEM); 500 501 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 502 &evt->dma, GFP_KERNEL); 503 if (!evt->buf) 504 return ERR_PTR(-ENOMEM); 505 506 return evt; 507 } 508 509 /** 510 * dwc3_free_event_buffers - frees all allocated event buffers 511 * @dwc: Pointer to our controller context structure 512 */ 513 static void dwc3_free_event_buffers(struct dwc3 *dwc) 514 { 515 struct dwc3_event_buffer *evt; 516 517 evt = dwc->ev_buf; 518 if (evt) 519 dwc3_free_one_event_buffer(dwc, evt); 520 } 521 522 /** 523 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 524 * @dwc: pointer to our controller context structure 525 * @length: size of event buffer 526 * 527 * Returns 0 on success otherwise negative errno. In the error case, dwc 528 * may contain some buffers allocated but not all which were requested. 529 */ 530 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 531 { 532 struct dwc3_event_buffer *evt; 533 unsigned int hw_mode; 534 535 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 536 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 537 dwc->ev_buf = NULL; 538 return 0; 539 } 540 541 evt = dwc3_alloc_one_event_buffer(dwc, length); 542 if (IS_ERR(evt)) { 543 dev_err(dwc->dev, "can't allocate event buffer\n"); 544 return PTR_ERR(evt); 545 } 546 dwc->ev_buf = evt; 547 548 return 0; 549 } 550 551 /** 552 * dwc3_event_buffers_setup - setup our allocated event buffers 553 * @dwc: pointer to our controller context structure 554 * 555 * Returns 0 on success otherwise negative errno. 556 */ 557 int dwc3_event_buffers_setup(struct dwc3 *dwc) 558 { 559 struct dwc3_event_buffer *evt; 560 u32 reg; 561 562 if (!dwc->ev_buf) 563 return 0; 564 565 evt = dwc->ev_buf; 566 evt->lpos = 0; 567 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 568 lower_32_bits(evt->dma)); 569 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 570 upper_32_bits(evt->dma)); 571 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 572 DWC3_GEVNTSIZ_SIZE(evt->length)); 573 574 /* Clear any stale event */ 575 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 576 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 577 return 0; 578 } 579 580 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 581 { 582 struct dwc3_event_buffer *evt; 583 u32 reg; 584 585 if (!dwc->ev_buf) 586 return; 587 /* 588 * Exynos platforms may not be able to access event buffer if the 589 * controller failed to halt on dwc3_core_exit(). 590 */ 591 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 592 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 593 return; 594 595 evt = dwc->ev_buf; 596 597 evt->lpos = 0; 598 599 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 600 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 601 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 602 | DWC3_GEVNTSIZ_SIZE(0)); 603 604 /* Clear any stale event */ 605 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 606 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); 607 } 608 609 static void dwc3_core_num_eps(struct dwc3 *dwc) 610 { 611 struct dwc3_hwparams *parms = &dwc->hwparams; 612 613 dwc->num_eps = DWC3_NUM_EPS(parms); 614 } 615 616 static void dwc3_cache_hwparams(struct dwc3 *dwc) 617 { 618 struct dwc3_hwparams *parms = &dwc->hwparams; 619 620 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 621 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 622 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 623 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 624 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 625 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 626 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 627 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 628 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 629 630 if (DWC3_IP_IS(DWC32)) 631 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 632 } 633 634 static void dwc3_config_soc_bus(struct dwc3 *dwc) 635 { 636 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) { 637 u32 reg; 638 639 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 640 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0); 641 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo); 642 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg); 643 } 644 } 645 646 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 647 { 648 int intf; 649 int ret = 0; 650 651 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 652 653 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 654 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 655 dwc->hsphy_interface && 656 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 657 ret = dwc3_ulpi_init(dwc); 658 659 return ret; 660 } 661 662 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) 663 { 664 u32 reg; 665 666 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index)); 667 668 /* 669 * Make sure UX_EXIT_PX is cleared as that causes issues with some 670 * PHYs. Also, this bit is not supposed to be used in normal operation. 671 */ 672 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 673 674 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ 675 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 676 677 if (dwc->u2ss_inp3_quirk) 678 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 679 680 if (dwc->dis_rxdet_inp3_quirk) 681 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 682 683 if (dwc->req_p1p2p3_quirk) 684 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 685 686 if (dwc->del_p1p2p3_quirk) 687 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 688 689 if (dwc->del_phy_power_chg_quirk) 690 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 691 692 if (dwc->lfps_filter_quirk) 693 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 694 695 if (dwc->rx_detect_poll_quirk) 696 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 697 698 if (dwc->tx_de_emphasis_quirk) 699 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 700 701 if (dwc->dis_del_phy_power_chg_quirk) 702 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 703 704 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg); 705 706 return 0; 707 } 708 709 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) 710 { 711 u32 reg; 712 713 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index)); 714 715 /* Select the HS PHY interface */ 716 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 717 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 718 if (dwc->hsphy_interface && 719 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 720 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 721 break; 722 } else if (dwc->hsphy_interface && 723 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 724 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 725 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 726 } else { 727 /* Relying on default value. */ 728 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 729 break; 730 } 731 fallthrough; 732 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 733 default: 734 break; 735 } 736 737 switch (dwc->hsphy_mode) { 738 case USBPHY_INTERFACE_MODE_UTMI: 739 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 740 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 741 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 742 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 743 break; 744 case USBPHY_INTERFACE_MODE_UTMIW: 745 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 746 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 747 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 748 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 749 break; 750 default: 751 break; 752 } 753 754 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ 755 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 756 757 if (dwc->dis_enblslpm_quirk) 758 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 759 else 760 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 761 762 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 763 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 764 765 /* 766 * Some ULPI USB PHY does not support internal VBUS supply, to drive 767 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 768 * bit of OTG_CTRL register. Controller configures the USB2 PHY 769 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 770 * with an external supply. 771 */ 772 if (dwc->ulpi_ext_vbus_drv) 773 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 774 775 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg); 776 777 return 0; 778 } 779 780 /** 781 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 782 * @dwc: Pointer to our controller context structure 783 * 784 * Returns 0 on success. The USB PHY interfaces are configured but not 785 * initialized. The PHY interfaces and the PHYs get initialized together with 786 * the core in dwc3_core_init. 787 */ 788 static int dwc3_phy_setup(struct dwc3 *dwc) 789 { 790 int i; 791 int ret; 792 793 for (i = 0; i < dwc->num_usb3_ports; i++) { 794 ret = dwc3_ss_phy_setup(dwc, i); 795 if (ret) 796 return ret; 797 } 798 799 for (i = 0; i < dwc->num_usb2_ports; i++) { 800 ret = dwc3_hs_phy_setup(dwc, i); 801 if (ret) 802 return ret; 803 } 804 805 return 0; 806 } 807 808 static int dwc3_phy_init(struct dwc3 *dwc) 809 { 810 int ret; 811 int i; 812 int j; 813 814 usb_phy_init(dwc->usb2_phy); 815 usb_phy_init(dwc->usb3_phy); 816 817 for (i = 0; i < dwc->num_usb2_ports; i++) { 818 ret = phy_init(dwc->usb2_generic_phy[i]); 819 if (ret < 0) 820 goto err_exit_usb2_phy; 821 } 822 823 for (j = 0; j < dwc->num_usb3_ports; j++) { 824 ret = phy_init(dwc->usb3_generic_phy[j]); 825 if (ret < 0) 826 goto err_exit_usb3_phy; 827 } 828 829 /* 830 * Above DWC_usb3.0 1.94a, it is recommended to set 831 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during 832 * coreConsultant configuration. So default value will be '0' when the 833 * core is reset. Application needs to set it to '1' after the core 834 * initialization is completed. 835 * 836 * Certain phy requires to be in P0 power state during initialization. 837 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear 838 * prior to phy init to maintain in the P0 state. 839 * 840 * After phy initialization, some phy operations can only be executed 841 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and 842 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid 843 * blocking phy ops. 844 */ 845 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 846 dwc3_enable_susphy(dwc, true); 847 848 return 0; 849 850 err_exit_usb3_phy: 851 while (--j >= 0) 852 phy_exit(dwc->usb3_generic_phy[j]); 853 854 err_exit_usb2_phy: 855 while (--i >= 0) 856 phy_exit(dwc->usb2_generic_phy[i]); 857 858 usb_phy_shutdown(dwc->usb3_phy); 859 usb_phy_shutdown(dwc->usb2_phy); 860 861 return ret; 862 } 863 864 static void dwc3_phy_exit(struct dwc3 *dwc) 865 { 866 int i; 867 868 for (i = 0; i < dwc->num_usb3_ports; i++) 869 phy_exit(dwc->usb3_generic_phy[i]); 870 871 for (i = 0; i < dwc->num_usb2_ports; i++) 872 phy_exit(dwc->usb2_generic_phy[i]); 873 874 usb_phy_shutdown(dwc->usb3_phy); 875 usb_phy_shutdown(dwc->usb2_phy); 876 } 877 878 static int dwc3_phy_power_on(struct dwc3 *dwc) 879 { 880 int ret; 881 int i; 882 int j; 883 884 usb_phy_set_suspend(dwc->usb2_phy, 0); 885 usb_phy_set_suspend(dwc->usb3_phy, 0); 886 887 for (i = 0; i < dwc->num_usb2_ports; i++) { 888 ret = phy_power_on(dwc->usb2_generic_phy[i]); 889 if (ret < 0) 890 goto err_power_off_usb2_phy; 891 } 892 893 for (j = 0; j < dwc->num_usb3_ports; j++) { 894 ret = phy_power_on(dwc->usb3_generic_phy[j]); 895 if (ret < 0) 896 goto err_power_off_usb3_phy; 897 } 898 899 return 0; 900 901 err_power_off_usb3_phy: 902 while (--j >= 0) 903 phy_power_off(dwc->usb3_generic_phy[j]); 904 905 err_power_off_usb2_phy: 906 while (--i >= 0) 907 phy_power_off(dwc->usb2_generic_phy[i]); 908 909 usb_phy_set_suspend(dwc->usb3_phy, 1); 910 usb_phy_set_suspend(dwc->usb2_phy, 1); 911 912 return ret; 913 } 914 915 static void dwc3_phy_power_off(struct dwc3 *dwc) 916 { 917 int i; 918 919 for (i = 0; i < dwc->num_usb3_ports; i++) 920 phy_power_off(dwc->usb3_generic_phy[i]); 921 922 for (i = 0; i < dwc->num_usb2_ports; i++) 923 phy_power_off(dwc->usb2_generic_phy[i]); 924 925 usb_phy_set_suspend(dwc->usb3_phy, 1); 926 usb_phy_set_suspend(dwc->usb2_phy, 1); 927 } 928 929 static int dwc3_clk_enable(struct dwc3 *dwc) 930 { 931 int ret; 932 933 ret = clk_prepare_enable(dwc->bus_clk); 934 if (ret) 935 return ret; 936 937 ret = clk_prepare_enable(dwc->ref_clk); 938 if (ret) 939 goto disable_bus_clk; 940 941 ret = clk_prepare_enable(dwc->susp_clk); 942 if (ret) 943 goto disable_ref_clk; 944 945 ret = clk_prepare_enable(dwc->utmi_clk); 946 if (ret) 947 goto disable_susp_clk; 948 949 ret = clk_prepare_enable(dwc->pipe_clk); 950 if (ret) 951 goto disable_utmi_clk; 952 953 return 0; 954 955 disable_utmi_clk: 956 clk_disable_unprepare(dwc->utmi_clk); 957 disable_susp_clk: 958 clk_disable_unprepare(dwc->susp_clk); 959 disable_ref_clk: 960 clk_disable_unprepare(dwc->ref_clk); 961 disable_bus_clk: 962 clk_disable_unprepare(dwc->bus_clk); 963 return ret; 964 } 965 966 static void dwc3_clk_disable(struct dwc3 *dwc) 967 { 968 clk_disable_unprepare(dwc->pipe_clk); 969 clk_disable_unprepare(dwc->utmi_clk); 970 clk_disable_unprepare(dwc->susp_clk); 971 clk_disable_unprepare(dwc->ref_clk); 972 clk_disable_unprepare(dwc->bus_clk); 973 } 974 975 static void dwc3_core_exit(struct dwc3 *dwc) 976 { 977 dwc3_event_buffers_cleanup(dwc); 978 dwc3_phy_power_off(dwc); 979 dwc3_phy_exit(dwc); 980 dwc3_clk_disable(dwc); 981 reset_control_assert(dwc->reset); 982 } 983 984 static bool dwc3_core_is_valid(struct dwc3 *dwc) 985 { 986 u32 reg; 987 988 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 989 dwc->ip = DWC3_GSNPS_ID(reg); 990 991 /* This should read as U3 followed by revision number */ 992 if (DWC3_IP_IS(DWC3)) { 993 dwc->revision = reg; 994 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 995 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 996 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 997 } else { 998 return false; 999 } 1000 1001 return true; 1002 } 1003 1004 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 1005 { 1006 unsigned int power_opt; 1007 unsigned int hw_mode; 1008 u32 reg; 1009 1010 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1011 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 1012 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1013 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 1014 1015 switch (power_opt) { 1016 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 1017 /** 1018 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 1019 * issue which would cause xHCI compliance tests to fail. 1020 * 1021 * Because of that we cannot enable clock gating on such 1022 * configurations. 1023 * 1024 * Refers to: 1025 * 1026 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 1027 * SOF/ITP Mode Used 1028 */ 1029 if ((dwc->dr_mode == USB_DR_MODE_HOST || 1030 dwc->dr_mode == USB_DR_MODE_OTG) && 1031 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 1032 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 1033 else 1034 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 1035 break; 1036 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 1037 /* 1038 * REVISIT Enabling this bit so that host-mode hibernation 1039 * will work. Device-mode hibernation is not yet implemented. 1040 */ 1041 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 1042 break; 1043 default: 1044 /* nothing */ 1045 break; 1046 } 1047 1048 /* 1049 * This is a workaround for STAR#4846132, which only affects 1050 * DWC_usb31 version2.00a operating in host mode. 1051 * 1052 * There is a problem in DWC_usb31 version 2.00a operating 1053 * in host mode that would cause a CSR read timeout When CSR 1054 * read coincides with RAM Clock Gating Entry. By disable 1055 * Clock Gating, sacrificing power consumption for normal 1056 * operation. 1057 */ 1058 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && 1059 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) 1060 reg |= DWC3_GCTL_DSBLCLKGTNG; 1061 1062 /* check if current dwc3 is on simulation board */ 1063 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 1064 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 1065 dwc->is_fpga = true; 1066 } 1067 1068 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 1069 "disable_scramble cannot be used on non-FPGA builds\n"); 1070 1071 if (dwc->disable_scramble_quirk && dwc->is_fpga) 1072 reg |= DWC3_GCTL_DISSCRAMBLE; 1073 else 1074 reg &= ~DWC3_GCTL_DISSCRAMBLE; 1075 1076 if (dwc->u2exit_lfps_quirk) 1077 reg |= DWC3_GCTL_U2EXIT_LFPS; 1078 1079 /* 1080 * WORKAROUND: DWC3 revisions <1.90a have a bug 1081 * where the device can fail to connect at SuperSpeed 1082 * and falls back to high-speed mode which causes 1083 * the device to enter a Connect/Disconnect loop 1084 */ 1085 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 1086 reg |= DWC3_GCTL_U2RSTECN; 1087 1088 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1089 } 1090 1091 static int dwc3_core_get_phy(struct dwc3 *dwc); 1092 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 1093 1094 /* set global incr burst type configuration registers */ 1095 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 1096 { 1097 struct device *dev = dwc->dev; 1098 /* incrx_mode : for INCR burst type. */ 1099 bool incrx_mode; 1100 /* incrx_size : for size of INCRX burst. */ 1101 u32 incrx_size; 1102 u32 *vals; 1103 u32 cfg; 1104 int ntype; 1105 int ret; 1106 int i; 1107 1108 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 1109 1110 /* 1111 * Handle property "snps,incr-burst-type-adjustment". 1112 * Get the number of value from this property: 1113 * result <= 0, means this property is not supported. 1114 * result = 1, means INCRx burst mode supported. 1115 * result > 1, means undefined length burst mode supported. 1116 */ 1117 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 1118 if (ntype <= 0) 1119 return; 1120 1121 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 1122 if (!vals) 1123 return; 1124 1125 /* Get INCR burst type, and parse it */ 1126 ret = device_property_read_u32_array(dev, 1127 "snps,incr-burst-type-adjustment", vals, ntype); 1128 if (ret) { 1129 kfree(vals); 1130 dev_err(dev, "Error to get property\n"); 1131 return; 1132 } 1133 1134 incrx_size = *vals; 1135 1136 if (ntype > 1) { 1137 /* INCRX (undefined length) burst mode */ 1138 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 1139 for (i = 1; i < ntype; i++) { 1140 if (vals[i] > incrx_size) 1141 incrx_size = vals[i]; 1142 } 1143 } else { 1144 /* INCRX burst mode */ 1145 incrx_mode = INCRX_BURST_MODE; 1146 } 1147 1148 kfree(vals); 1149 1150 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1151 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1152 if (incrx_mode) 1153 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1154 switch (incrx_size) { 1155 case 256: 1156 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1157 break; 1158 case 128: 1159 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1160 break; 1161 case 64: 1162 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1163 break; 1164 case 32: 1165 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1166 break; 1167 case 16: 1168 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1169 break; 1170 case 8: 1171 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1172 break; 1173 case 4: 1174 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1175 break; 1176 case 1: 1177 break; 1178 default: 1179 dev_err(dev, "Invalid property\n"); 1180 break; 1181 } 1182 1183 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1184 } 1185 1186 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1187 { 1188 u32 scale; 1189 u32 reg; 1190 1191 if (!dwc->susp_clk) 1192 return; 1193 1194 /* 1195 * The power down scale field specifies how many suspend_clk 1196 * periods fit into a 16KHz clock period. When performing 1197 * the division, round up the remainder. 1198 * 1199 * The power down scale value is calculated using the fastest 1200 * frequency of the suspend_clk. If it isn't fixed (but within 1201 * the accuracy requirement), the driver may not know the max 1202 * rate of the suspend_clk, so only update the power down scale 1203 * if the default is less than the calculated value from 1204 * clk_get_rate() or if the default is questionably high 1205 * (3x or more) to be within the requirement. 1206 */ 1207 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1208 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1209 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1210 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1211 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1212 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1213 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1214 } 1215 } 1216 1217 static void dwc3_config_threshold(struct dwc3 *dwc) 1218 { 1219 u32 reg; 1220 u8 rx_thr_num; 1221 u8 rx_maxburst; 1222 u8 tx_thr_num; 1223 u8 tx_maxburst; 1224 1225 /* 1226 * Must config both number of packets and max burst settings to enable 1227 * RX and/or TX threshold. 1228 */ 1229 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1230 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1231 rx_maxburst = dwc->rx_max_burst_prd; 1232 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1233 tx_maxburst = dwc->tx_max_burst_prd; 1234 1235 if (rx_thr_num && rx_maxburst) { 1236 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1237 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1238 1239 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1240 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1241 1242 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1243 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1244 1245 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1246 } 1247 1248 if (tx_thr_num && tx_maxburst) { 1249 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1250 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1251 1252 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1253 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1254 1255 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1256 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1257 1258 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1259 } 1260 } 1261 1262 rx_thr_num = dwc->rx_thr_num_pkt; 1263 rx_maxburst = dwc->rx_max_burst; 1264 tx_thr_num = dwc->tx_thr_num_pkt; 1265 tx_maxburst = dwc->tx_max_burst; 1266 1267 if (DWC3_IP_IS(DWC3)) { 1268 if (rx_thr_num && rx_maxburst) { 1269 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1270 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1271 1272 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1273 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1274 1275 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1276 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1277 1278 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1279 } 1280 1281 if (tx_thr_num && tx_maxburst) { 1282 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1283 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1284 1285 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1286 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1287 1288 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1289 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1290 1291 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1292 } 1293 } else { 1294 if (rx_thr_num && rx_maxburst) { 1295 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1296 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1297 1298 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1299 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1300 1301 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1302 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1303 1304 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1305 } 1306 1307 if (tx_thr_num && tx_maxburst) { 1308 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1309 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1310 1311 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1312 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1313 1314 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1315 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1316 1317 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1318 } 1319 } 1320 } 1321 1322 /** 1323 * dwc3_core_init - Low-level initialization of DWC3 Core 1324 * @dwc: Pointer to our controller context structure 1325 * 1326 * Returns 0 on success otherwise negative errno. 1327 */ 1328 static int dwc3_core_init(struct dwc3 *dwc) 1329 { 1330 unsigned int hw_mode; 1331 u32 reg; 1332 int ret; 1333 1334 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1335 1336 /* 1337 * Write Linux Version Code to our GUID register so it's easy to figure 1338 * out which kernel version a bug was found. 1339 */ 1340 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1341 1342 ret = dwc3_phy_setup(dwc); 1343 if (ret) 1344 return ret; 1345 1346 if (!dwc->ulpi_ready) { 1347 ret = dwc3_core_ulpi_init(dwc); 1348 if (ret) { 1349 if (ret == -ETIMEDOUT) { 1350 dwc3_core_soft_reset(dwc); 1351 ret = -EPROBE_DEFER; 1352 } 1353 return ret; 1354 } 1355 dwc->ulpi_ready = true; 1356 } 1357 1358 if (!dwc->phys_ready) { 1359 ret = dwc3_core_get_phy(dwc); 1360 if (ret) 1361 goto err_exit_ulpi; 1362 dwc->phys_ready = true; 1363 } 1364 1365 ret = dwc3_phy_init(dwc); 1366 if (ret) 1367 goto err_exit_ulpi; 1368 1369 ret = dwc3_core_soft_reset(dwc); 1370 if (ret) 1371 goto err_exit_phy; 1372 1373 dwc3_core_setup_global_control(dwc); 1374 dwc3_core_num_eps(dwc); 1375 1376 /* Set power down scale of suspend_clk */ 1377 dwc3_set_power_down_clk_scale(dwc); 1378 1379 /* Adjust Frame Length */ 1380 dwc3_frame_length_adjustment(dwc); 1381 1382 /* Adjust Reference Clock Period */ 1383 dwc3_ref_clk_period(dwc); 1384 1385 dwc3_set_incr_burst_type(dwc); 1386 1387 dwc3_config_soc_bus(dwc); 1388 1389 ret = dwc3_phy_power_on(dwc); 1390 if (ret) 1391 goto err_exit_phy; 1392 1393 ret = dwc3_event_buffers_setup(dwc); 1394 if (ret) { 1395 dev_err(dwc->dev, "failed to setup event buffers\n"); 1396 goto err_power_off_phy; 1397 } 1398 1399 /* 1400 * ENDXFER polling is available on version 3.10a and later of 1401 * the DWC_usb3 controller. It is NOT available in the 1402 * DWC_usb31 controller. 1403 */ 1404 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1405 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1406 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1407 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1408 } 1409 1410 /* 1411 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a 1412 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the 1413 * link compliance test (TD7.21) may fail. If the ECN is not 1414 * enabled (GUCTL2[19] = 0), the controller will use the old timer 1415 * value (5us), which is still acceptable for the link compliance 1416 * test. Therefore, do not enable PM TIMER ECM in 3.20a by 1417 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. 1418 */ 1419 if (DWC3_VER_IS(DWC3, 320A)) { 1420 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1421 reg &= ~DWC3_GUCTL2_LC_TIMER; 1422 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1423 } 1424 1425 /* 1426 * When configured in HOST mode, after issuing U3/L2 exit controller 1427 * fails to send proper CRC checksum in CRC5 field. Because of this 1428 * behaviour Transaction Error is generated, resulting in reset and 1429 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1430 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1431 * will correct this problem. This option is to support certain 1432 * legacy ULPI PHYs. 1433 */ 1434 if (dwc->resume_hs_terminations) { 1435 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1436 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1437 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1438 } 1439 1440 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1441 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1442 1443 /* 1444 * Enable hardware control of sending remote wakeup 1445 * in HS when the device is in the L1 state. 1446 */ 1447 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1448 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1449 1450 /* 1451 * Decouple USB 2.0 L1 & L2 events which will allow for 1452 * gadget driver to only receive U3/L2 suspend & wakeup 1453 * events and prevent the more frequent L1 LPM transitions 1454 * from interrupting the driver. 1455 */ 1456 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1457 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1458 1459 if (dwc->dis_tx_ipgap_linecheck_quirk) 1460 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1461 1462 if (dwc->parkmode_disable_ss_quirk) 1463 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1464 1465 if (dwc->parkmode_disable_hs_quirk) 1466 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1467 1468 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) { 1469 if (dwc->maximum_speed == USB_SPEED_FULL || 1470 dwc->maximum_speed == USB_SPEED_HIGH) 1471 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1472 else 1473 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1474 } 1475 1476 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1477 } 1478 1479 dwc3_config_threshold(dwc); 1480 1481 /* 1482 * Modify this for all supported Super Speed ports when 1483 * multiport support is added. 1484 */ 1485 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1486 (DWC3_IP_IS(DWC31)) && 1487 dwc->maximum_speed == USB_SPEED_SUPER) { 1488 int i; 1489 1490 for (i = 0; i < dwc->num_usb3_ports; i++) { 1491 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i)); 1492 reg |= DWC3_LLUCTL_FORCE_GEN1; 1493 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg); 1494 } 1495 } 1496 1497 /* 1498 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and 1499 * prior. When an active endpoint not currently cached in the host 1500 * controller is chosen to be cached to the same index as an endpoint 1501 * receiving NAKs, the endpoint receiving NAKs enters continuous 1502 * retry mode. This prevents it from being evicted from the host 1503 * controller cache, blocking the new endpoint from being cached and 1504 * serviced. 1505 * 1506 * To resolve this, for controller versions 1.70a and 1.80a, set the 1507 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit 1508 * disables the USB2.0 internal retry feature. The GUCTL3[16] register 1509 * function is available only from version 1.70a. 1510 */ 1511 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) { 1512 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 1513 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; 1514 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 1515 } 1516 1517 return 0; 1518 1519 err_power_off_phy: 1520 dwc3_phy_power_off(dwc); 1521 err_exit_phy: 1522 dwc3_phy_exit(dwc); 1523 err_exit_ulpi: 1524 dwc3_ulpi_exit(dwc); 1525 1526 return ret; 1527 } 1528 1529 static int dwc3_core_get_phy(struct dwc3 *dwc) 1530 { 1531 struct device *dev = dwc->dev; 1532 struct device_node *node = dev->of_node; 1533 char phy_name[9]; 1534 int ret; 1535 u8 i; 1536 1537 if (node) { 1538 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1539 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1540 } else { 1541 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1542 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1543 } 1544 1545 if (IS_ERR(dwc->usb2_phy)) { 1546 ret = PTR_ERR(dwc->usb2_phy); 1547 if (ret == -ENXIO || ret == -ENODEV) 1548 dwc->usb2_phy = NULL; 1549 else 1550 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1551 } 1552 1553 if (IS_ERR(dwc->usb3_phy)) { 1554 ret = PTR_ERR(dwc->usb3_phy); 1555 if (ret == -ENXIO || ret == -ENODEV) 1556 dwc->usb3_phy = NULL; 1557 else 1558 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1559 } 1560 1561 for (i = 0; i < dwc->num_usb2_ports; i++) { 1562 if (dwc->num_usb2_ports == 1) 1563 snprintf(phy_name, sizeof(phy_name), "usb2-phy"); 1564 else 1565 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i); 1566 1567 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name); 1568 if (IS_ERR(dwc->usb2_generic_phy[i])) { 1569 ret = PTR_ERR(dwc->usb2_generic_phy[i]); 1570 if (ret == -ENOSYS || ret == -ENODEV) 1571 dwc->usb2_generic_phy[i] = NULL; 1572 else 1573 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1574 phy_name); 1575 } 1576 } 1577 1578 for (i = 0; i < dwc->num_usb3_ports; i++) { 1579 if (dwc->num_usb3_ports == 1) 1580 snprintf(phy_name, sizeof(phy_name), "usb3-phy"); 1581 else 1582 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i); 1583 1584 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name); 1585 if (IS_ERR(dwc->usb3_generic_phy[i])) { 1586 ret = PTR_ERR(dwc->usb3_generic_phy[i]); 1587 if (ret == -ENOSYS || ret == -ENODEV) 1588 dwc->usb3_generic_phy[i] = NULL; 1589 else 1590 return dev_err_probe(dev, ret, "failed to lookup phy %s\n", 1591 phy_name); 1592 } 1593 } 1594 1595 return 0; 1596 } 1597 1598 static int dwc3_core_init_mode(struct dwc3 *dwc) 1599 { 1600 struct device *dev = dwc->dev; 1601 int ret; 1602 int i; 1603 1604 switch (dwc->dr_mode) { 1605 case USB_DR_MODE_PERIPHERAL: 1606 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); 1607 1608 if (dwc->usb2_phy) 1609 otg_set_vbus(dwc->usb2_phy->otg, false); 1610 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE); 1611 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE); 1612 1613 ret = dwc3_gadget_init(dwc); 1614 if (ret) 1615 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1616 break; 1617 case USB_DR_MODE_HOST: 1618 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); 1619 1620 if (dwc->usb2_phy) 1621 otg_set_vbus(dwc->usb2_phy->otg, true); 1622 for (i = 0; i < dwc->num_usb2_ports; i++) 1623 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST); 1624 for (i = 0; i < dwc->num_usb3_ports; i++) 1625 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST); 1626 1627 ret = dwc3_host_init(dwc); 1628 if (ret) 1629 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1630 break; 1631 case USB_DR_MODE_OTG: 1632 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1633 ret = dwc3_drd_init(dwc); 1634 if (ret) 1635 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1636 break; 1637 default: 1638 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1639 return -EINVAL; 1640 } 1641 1642 return 0; 1643 } 1644 1645 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1646 { 1647 switch (dwc->dr_mode) { 1648 case USB_DR_MODE_PERIPHERAL: 1649 dwc3_gadget_exit(dwc); 1650 break; 1651 case USB_DR_MODE_HOST: 1652 dwc3_host_exit(dwc); 1653 break; 1654 case USB_DR_MODE_OTG: 1655 dwc3_drd_exit(dwc); 1656 break; 1657 default: 1658 /* do nothing */ 1659 break; 1660 } 1661 1662 /* de-assert DRVVBUS for HOST and OTG mode */ 1663 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 1664 } 1665 1666 static void dwc3_get_software_properties(struct dwc3 *dwc) 1667 { 1668 struct device *tmpdev; 1669 u16 gsbuscfg0_reqinfo; 1670 int ret; 1671 1672 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED; 1673 1674 /* 1675 * Iterate over all parent nodes for finding swnode properties 1676 * and non-DT (non-ABI) properties. 1677 */ 1678 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) { 1679 ret = device_property_read_u16(tmpdev, 1680 "snps,gsbuscfg0-reqinfo", 1681 &gsbuscfg0_reqinfo); 1682 if (!ret) 1683 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo; 1684 } 1685 } 1686 1687 static void dwc3_get_properties(struct dwc3 *dwc) 1688 { 1689 struct device *dev = dwc->dev; 1690 u8 lpm_nyet_threshold; 1691 u8 tx_de_emphasis; 1692 u8 hird_threshold; 1693 u8 rx_thr_num_pkt = 0; 1694 u8 rx_max_burst = 0; 1695 u8 tx_thr_num_pkt = 0; 1696 u8 tx_max_burst = 0; 1697 u8 rx_thr_num_pkt_prd = 0; 1698 u8 rx_max_burst_prd = 0; 1699 u8 tx_thr_num_pkt_prd = 0; 1700 u8 tx_max_burst_prd = 0; 1701 u8 tx_fifo_resize_max_num; 1702 u16 num_hc_interrupters; 1703 1704 /* default to highest possible threshold */ 1705 lpm_nyet_threshold = 0xf; 1706 1707 /* default to -3.5dB de-emphasis */ 1708 tx_de_emphasis = 1; 1709 1710 /* 1711 * default to assert utmi_sleep_n and use maximum allowed HIRD 1712 * threshold value of 0b1100 1713 */ 1714 hird_threshold = 12; 1715 1716 /* 1717 * default to a TXFIFO size large enough to fit 6 max packets. This 1718 * allows for systems with larger bus latencies to have some headroom 1719 * for endpoints that have a large bMaxBurst value. 1720 */ 1721 tx_fifo_resize_max_num = 6; 1722 1723 /* default to a single XHCI interrupter */ 1724 num_hc_interrupters = 1; 1725 1726 dwc->maximum_speed = usb_get_maximum_speed(dev); 1727 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1728 dwc->dr_mode = usb_get_dr_mode(dev); 1729 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1730 1731 dwc->sysdev_is_parent = device_property_read_bool(dev, 1732 "linux,sysdev_is_parent"); 1733 if (dwc->sysdev_is_parent) 1734 dwc->sysdev = dwc->dev->parent; 1735 else 1736 dwc->sysdev = dwc->dev; 1737 1738 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1739 1740 dwc->has_lpm_erratum = device_property_read_bool(dev, 1741 "snps,has-lpm-erratum"); 1742 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1743 &lpm_nyet_threshold); 1744 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1745 "snps,is-utmi-l1-suspend"); 1746 device_property_read_u8(dev, "snps,hird-threshold", 1747 &hird_threshold); 1748 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1749 "snps,dis-start-transfer-quirk"); 1750 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1751 "snps,usb3_lpm_capable"); 1752 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1753 "snps,usb2-lpm-disable"); 1754 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1755 "snps,usb2-gadget-lpm-disable"); 1756 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1757 &rx_thr_num_pkt); 1758 device_property_read_u8(dev, "snps,rx-max-burst", 1759 &rx_max_burst); 1760 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1761 &tx_thr_num_pkt); 1762 device_property_read_u8(dev, "snps,tx-max-burst", 1763 &tx_max_burst); 1764 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1765 &rx_thr_num_pkt_prd); 1766 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1767 &rx_max_burst_prd); 1768 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1769 &tx_thr_num_pkt_prd); 1770 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1771 &tx_max_burst_prd); 1772 device_property_read_u16(dev, "num-hc-interrupters", 1773 &num_hc_interrupters); 1774 /* DWC3 core allowed to have a max of 8 interrupters */ 1775 if (num_hc_interrupters > 8) 1776 num_hc_interrupters = 8; 1777 1778 dwc->do_fifo_resize = device_property_read_bool(dev, 1779 "tx-fifo-resize"); 1780 if (dwc->do_fifo_resize) 1781 device_property_read_u8(dev, "tx-fifo-max-num", 1782 &tx_fifo_resize_max_num); 1783 1784 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1785 "snps,disable_scramble_quirk"); 1786 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1787 "snps,u2exit_lfps_quirk"); 1788 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1789 "snps,u2ss_inp3_quirk"); 1790 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1791 "snps,req_p1p2p3_quirk"); 1792 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1793 "snps,del_p1p2p3_quirk"); 1794 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1795 "snps,del_phy_power_chg_quirk"); 1796 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1797 "snps,lfps_filter_quirk"); 1798 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1799 "snps,rx_detect_poll_quirk"); 1800 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1801 "snps,dis_u3_susphy_quirk"); 1802 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1803 "snps,dis_u2_susphy_quirk"); 1804 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1805 "snps,dis_enblslpm_quirk"); 1806 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1807 "snps,dis-u1-entry-quirk"); 1808 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1809 "snps,dis-u2-entry-quirk"); 1810 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1811 "snps,dis_rxdet_inp3_quirk"); 1812 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1813 "snps,dis-u2-freeclk-exists-quirk"); 1814 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1815 "snps,dis-del-phy-power-chg-quirk"); 1816 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1817 "snps,dis-tx-ipgap-linecheck-quirk"); 1818 dwc->resume_hs_terminations = device_property_read_bool(dev, 1819 "snps,resume-hs-terminations"); 1820 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1821 "snps,ulpi-ext-vbus-drv"); 1822 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1823 "snps,parkmode-disable-ss-quirk"); 1824 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1825 "snps,parkmode-disable-hs-quirk"); 1826 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1827 "snps,gfladj-refclk-lpm-sel-quirk"); 1828 1829 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1830 "snps,tx_de_emphasis_quirk"); 1831 device_property_read_u8(dev, "snps,tx_de_emphasis", 1832 &tx_de_emphasis); 1833 device_property_read_string(dev, "snps,hsphy_interface", 1834 &dwc->hsphy_interface); 1835 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1836 &dwc->fladj); 1837 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1838 &dwc->ref_clk_per); 1839 1840 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1841 "snps,dis_metastability_quirk"); 1842 1843 dwc->dis_split_quirk = device_property_read_bool(dev, 1844 "snps,dis-split-quirk"); 1845 1846 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1847 dwc->tx_de_emphasis = tx_de_emphasis; 1848 1849 dwc->hird_threshold = hird_threshold; 1850 1851 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1852 dwc->rx_max_burst = rx_max_burst; 1853 1854 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1855 dwc->tx_max_burst = tx_max_burst; 1856 1857 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1858 dwc->rx_max_burst_prd = rx_max_burst_prd; 1859 1860 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1861 dwc->tx_max_burst_prd = tx_max_burst_prd; 1862 1863 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1864 1865 dwc->num_hc_interrupters = num_hc_interrupters; 1866 } 1867 1868 /* check whether the core supports IMOD */ 1869 bool dwc3_has_imod(struct dwc3 *dwc) 1870 { 1871 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1872 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1873 DWC3_IP_IS(DWC32); 1874 } 1875 1876 static void dwc3_check_params(struct dwc3 *dwc) 1877 { 1878 struct device *dev = dwc->dev; 1879 unsigned int hwparam_gen = 1880 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1881 1882 /* 1883 * Enable IMOD for all supporting controllers. 1884 * 1885 * Particularly, DWC_usb3 v3.00a must enable this feature for 1886 * the following reason: 1887 * 1888 * Workaround for STAR 9000961433 which affects only version 1889 * 3.00a of the DWC_usb3 core. This prevents the controller 1890 * interrupt from being masked while handling events. IMOD 1891 * allows us to work around this issue. Enable it for the 1892 * affected version. 1893 */ 1894 if (dwc3_has_imod((dwc))) 1895 dwc->imod_interval = 1; 1896 1897 /* Check the maximum_speed parameter */ 1898 switch (dwc->maximum_speed) { 1899 case USB_SPEED_FULL: 1900 case USB_SPEED_HIGH: 1901 break; 1902 case USB_SPEED_SUPER: 1903 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1904 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1905 break; 1906 case USB_SPEED_SUPER_PLUS: 1907 if ((DWC3_IP_IS(DWC32) && 1908 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1909 (!DWC3_IP_IS(DWC32) && 1910 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1911 dev_warn(dev, "UDC doesn't support SSP\n"); 1912 break; 1913 default: 1914 dev_err(dev, "invalid maximum_speed parameter %d\n", 1915 dwc->maximum_speed); 1916 fallthrough; 1917 case USB_SPEED_UNKNOWN: 1918 switch (hwparam_gen) { 1919 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1920 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1921 break; 1922 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1923 if (DWC3_IP_IS(DWC32)) 1924 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1925 else 1926 dwc->maximum_speed = USB_SPEED_SUPER; 1927 break; 1928 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1929 dwc->maximum_speed = USB_SPEED_HIGH; 1930 break; 1931 default: 1932 dwc->maximum_speed = USB_SPEED_SUPER; 1933 break; 1934 } 1935 break; 1936 } 1937 1938 /* 1939 * Currently the controller does not have visibility into the HW 1940 * parameter to determine the maximum number of lanes the HW supports. 1941 * If the number of lanes is not specified in the device property, then 1942 * set the default to support dual-lane for DWC_usb32 and single-lane 1943 * for DWC_usb31 for super-speed-plus. 1944 */ 1945 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1946 switch (dwc->max_ssp_rate) { 1947 case USB_SSP_GEN_2x1: 1948 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1949 dev_warn(dev, "UDC only supports Gen 1\n"); 1950 break; 1951 case USB_SSP_GEN_1x2: 1952 case USB_SSP_GEN_2x2: 1953 if (DWC3_IP_IS(DWC31)) 1954 dev_warn(dev, "UDC only supports single lane\n"); 1955 break; 1956 case USB_SSP_GEN_UNKNOWN: 1957 default: 1958 switch (hwparam_gen) { 1959 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1960 if (DWC3_IP_IS(DWC32)) 1961 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1962 else 1963 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1964 break; 1965 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1966 if (DWC3_IP_IS(DWC32)) 1967 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1968 break; 1969 } 1970 break; 1971 } 1972 } 1973 } 1974 1975 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1976 { 1977 struct device *dev = dwc->dev; 1978 struct device_node *np_phy; 1979 struct extcon_dev *edev = NULL; 1980 const char *name; 1981 1982 if (device_property_present(dev, "extcon")) 1983 return extcon_get_edev_by_phandle(dev, 0); 1984 1985 /* 1986 * Device tree platforms should get extcon via phandle. 1987 * On ACPI platforms, we get the name from a device property. 1988 * This device property is for kernel internal use only and 1989 * is expected to be set by the glue code. 1990 */ 1991 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 1992 return extcon_get_extcon_dev(name); 1993 1994 /* 1995 * Check explicitly if "usb-role-switch" is used since 1996 * extcon_find_edev_by_node() can not be used to check the absence of 1997 * an extcon device. In the absence of an device it will always return 1998 * EPROBE_DEFER. 1999 */ 2000 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 2001 device_property_read_bool(dev, "usb-role-switch")) 2002 return NULL; 2003 2004 /* 2005 * Try to get an extcon device from the USB PHY controller's "port" 2006 * node. Check if it has the "port" node first, to avoid printing the 2007 * error message from underlying code, as it's a valid case: extcon 2008 * device (and "port" node) may be missing in case of "usb-role-switch" 2009 * or OTG mode. 2010 */ 2011 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 2012 if (of_graph_is_present(np_phy)) { 2013 struct device_node *np_conn; 2014 2015 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 2016 if (np_conn) 2017 edev = extcon_find_edev_by_node(np_conn); 2018 of_node_put(np_conn); 2019 } 2020 of_node_put(np_phy); 2021 2022 return edev; 2023 } 2024 2025 static int dwc3_get_clocks(struct dwc3 *dwc) 2026 { 2027 struct device *dev = dwc->dev; 2028 2029 if (!dev->of_node) 2030 return 0; 2031 2032 /* 2033 * Clocks are optional, but new DT platforms should support all clocks 2034 * as required by the DT-binding. 2035 * Some devices have different clock names in legacy device trees, 2036 * check for them to retain backwards compatibility. 2037 */ 2038 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 2039 if (IS_ERR(dwc->bus_clk)) { 2040 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2041 "could not get bus clock\n"); 2042 } 2043 2044 if (dwc->bus_clk == NULL) { 2045 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 2046 if (IS_ERR(dwc->bus_clk)) { 2047 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 2048 "could not get bus clock\n"); 2049 } 2050 } 2051 2052 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 2053 if (IS_ERR(dwc->ref_clk)) { 2054 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2055 "could not get ref clock\n"); 2056 } 2057 2058 if (dwc->ref_clk == NULL) { 2059 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 2060 if (IS_ERR(dwc->ref_clk)) { 2061 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 2062 "could not get ref clock\n"); 2063 } 2064 } 2065 2066 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 2067 if (IS_ERR(dwc->susp_clk)) { 2068 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2069 "could not get suspend clock\n"); 2070 } 2071 2072 if (dwc->susp_clk == NULL) { 2073 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 2074 if (IS_ERR(dwc->susp_clk)) { 2075 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 2076 "could not get suspend clock\n"); 2077 } 2078 } 2079 2080 /* specific to Rockchip RK3588 */ 2081 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 2082 if (IS_ERR(dwc->utmi_clk)) { 2083 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 2084 "could not get utmi clock\n"); 2085 } 2086 2087 /* specific to Rockchip RK3588 */ 2088 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 2089 if (IS_ERR(dwc->pipe_clk)) { 2090 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 2091 "could not get pipe clock\n"); 2092 } 2093 2094 return 0; 2095 } 2096 2097 static int dwc3_get_num_ports(struct dwc3 *dwc) 2098 { 2099 void __iomem *base; 2100 u8 major_revision; 2101 u32 offset; 2102 u32 val; 2103 2104 /* 2105 * Remap xHCI address space to access XHCI ext cap regs since it is 2106 * needed to get information on number of ports present. 2107 */ 2108 base = ioremap(dwc->xhci_resources[0].start, 2109 resource_size(&dwc->xhci_resources[0])); 2110 if (!base) 2111 return -ENOMEM; 2112 2113 offset = 0; 2114 do { 2115 offset = xhci_find_next_ext_cap(base, offset, 2116 XHCI_EXT_CAPS_PROTOCOL); 2117 if (!offset) 2118 break; 2119 2120 val = readl(base + offset); 2121 major_revision = XHCI_EXT_PORT_MAJOR(val); 2122 2123 val = readl(base + offset + 0x08); 2124 if (major_revision == 0x03) { 2125 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val); 2126 } else if (major_revision <= 0x02) { 2127 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val); 2128 } else { 2129 dev_warn(dwc->dev, "unrecognized port major revision %d\n", 2130 major_revision); 2131 } 2132 } while (1); 2133 2134 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n", 2135 dwc->num_usb2_ports, dwc->num_usb3_ports); 2136 2137 iounmap(base); 2138 2139 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS || 2140 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS) 2141 return -EINVAL; 2142 2143 return 0; 2144 } 2145 2146 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) 2147 { 2148 struct power_supply *usb_psy; 2149 const char *usb_psy_name; 2150 int ret; 2151 2152 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); 2153 if (ret < 0) 2154 return NULL; 2155 2156 usb_psy = power_supply_get_by_name(usb_psy_name); 2157 if (!usb_psy) 2158 return ERR_PTR(-EPROBE_DEFER); 2159 2160 return usb_psy; 2161 } 2162 2163 static int dwc3_probe(struct platform_device *pdev) 2164 { 2165 struct device *dev = &pdev->dev; 2166 struct resource *res, dwc_res; 2167 unsigned int hw_mode; 2168 void __iomem *regs; 2169 struct dwc3 *dwc; 2170 int ret; 2171 2172 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); 2173 if (!dwc) 2174 return -ENOMEM; 2175 2176 dwc->dev = dev; 2177 2178 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2179 if (!res) { 2180 dev_err(dev, "missing memory resource\n"); 2181 return -ENODEV; 2182 } 2183 2184 dwc->xhci_resources[0].start = res->start; 2185 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 2186 DWC3_XHCI_REGS_END; 2187 dwc->xhci_resources[0].flags = res->flags; 2188 dwc->xhci_resources[0].name = res->name; 2189 2190 /* 2191 * Request memory region but exclude xHCI regs, 2192 * since it will be requested by the xhci-plat driver. 2193 */ 2194 dwc_res = *res; 2195 dwc_res.start += DWC3_GLOBALS_REGS_START; 2196 2197 if (dev->of_node) { 2198 struct device_node *parent = of_get_parent(dev->of_node); 2199 2200 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 2201 dwc_res.start -= DWC3_GLOBALS_REGS_START; 2202 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 2203 } 2204 2205 of_node_put(parent); 2206 } 2207 2208 regs = devm_ioremap_resource(dev, &dwc_res); 2209 if (IS_ERR(regs)) 2210 return PTR_ERR(regs); 2211 2212 dwc->regs = regs; 2213 dwc->regs_size = resource_size(&dwc_res); 2214 2215 dwc3_get_properties(dwc); 2216 2217 dwc3_get_software_properties(dwc); 2218 2219 dwc->usb_psy = dwc3_get_usb_power_supply(dwc); 2220 if (IS_ERR(dwc->usb_psy)) 2221 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); 2222 2223 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 2224 if (IS_ERR(dwc->reset)) { 2225 ret = PTR_ERR(dwc->reset); 2226 goto err_put_psy; 2227 } 2228 2229 ret = dwc3_get_clocks(dwc); 2230 if (ret) 2231 goto err_put_psy; 2232 2233 ret = reset_control_deassert(dwc->reset); 2234 if (ret) 2235 goto err_put_psy; 2236 2237 ret = dwc3_clk_enable(dwc); 2238 if (ret) 2239 goto err_assert_reset; 2240 2241 if (!dwc3_core_is_valid(dwc)) { 2242 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 2243 ret = -ENODEV; 2244 goto err_disable_clks; 2245 } 2246 2247 platform_set_drvdata(pdev, dwc); 2248 dwc3_cache_hwparams(dwc); 2249 2250 if (!dwc->sysdev_is_parent && 2251 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 2252 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 2253 if (ret) 2254 goto err_disable_clks; 2255 } 2256 2257 /* 2258 * Currently only DWC3 controllers that are host-only capable 2259 * can have more than one port. 2260 */ 2261 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 2262 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { 2263 ret = dwc3_get_num_ports(dwc); 2264 if (ret) 2265 goto err_disable_clks; 2266 } else { 2267 dwc->num_usb2_ports = 1; 2268 dwc->num_usb3_ports = 1; 2269 } 2270 2271 spin_lock_init(&dwc->lock); 2272 mutex_init(&dwc->mutex); 2273 2274 pm_runtime_get_noresume(dev); 2275 pm_runtime_set_active(dev); 2276 pm_runtime_use_autosuspend(dev); 2277 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 2278 pm_runtime_enable(dev); 2279 2280 pm_runtime_forbid(dev); 2281 2282 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 2283 if (ret) { 2284 dev_err(dwc->dev, "failed to allocate event buffers\n"); 2285 ret = -ENOMEM; 2286 goto err_allow_rpm; 2287 } 2288 2289 dwc->edev = dwc3_get_extcon(dwc); 2290 if (IS_ERR(dwc->edev)) { 2291 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 2292 goto err_free_event_buffers; 2293 } 2294 2295 ret = dwc3_get_dr_mode(dwc); 2296 if (ret) 2297 goto err_free_event_buffers; 2298 2299 ret = dwc3_core_init(dwc); 2300 if (ret) { 2301 dev_err_probe(dev, ret, "failed to initialize core\n"); 2302 goto err_free_event_buffers; 2303 } 2304 2305 dwc3_check_params(dwc); 2306 dwc3_debugfs_init(dwc); 2307 2308 ret = dwc3_core_init_mode(dwc); 2309 if (ret) 2310 goto err_exit_debugfs; 2311 2312 pm_runtime_put(dev); 2313 2314 dma_set_max_seg_size(dev, UINT_MAX); 2315 2316 return 0; 2317 2318 err_exit_debugfs: 2319 dwc3_debugfs_exit(dwc); 2320 dwc3_event_buffers_cleanup(dwc); 2321 dwc3_phy_power_off(dwc); 2322 dwc3_phy_exit(dwc); 2323 dwc3_ulpi_exit(dwc); 2324 err_free_event_buffers: 2325 dwc3_free_event_buffers(dwc); 2326 err_allow_rpm: 2327 pm_runtime_allow(dev); 2328 pm_runtime_disable(dev); 2329 pm_runtime_dont_use_autosuspend(dev); 2330 pm_runtime_set_suspended(dev); 2331 pm_runtime_put_noidle(dev); 2332 err_disable_clks: 2333 dwc3_clk_disable(dwc); 2334 err_assert_reset: 2335 reset_control_assert(dwc->reset); 2336 err_put_psy: 2337 if (dwc->usb_psy) 2338 power_supply_put(dwc->usb_psy); 2339 2340 return ret; 2341 } 2342 2343 static void dwc3_remove(struct platform_device *pdev) 2344 { 2345 struct dwc3 *dwc = platform_get_drvdata(pdev); 2346 2347 pm_runtime_get_sync(&pdev->dev); 2348 2349 dwc3_core_exit_mode(dwc); 2350 dwc3_debugfs_exit(dwc); 2351 2352 dwc3_core_exit(dwc); 2353 dwc3_ulpi_exit(dwc); 2354 2355 pm_runtime_allow(&pdev->dev); 2356 pm_runtime_disable(&pdev->dev); 2357 pm_runtime_dont_use_autosuspend(&pdev->dev); 2358 pm_runtime_put_noidle(&pdev->dev); 2359 /* 2360 * HACK: Clear the driver data, which is currently accessed by parent 2361 * glue drivers, before allowing the parent to suspend. 2362 */ 2363 platform_set_drvdata(pdev, NULL); 2364 pm_runtime_set_suspended(&pdev->dev); 2365 2366 dwc3_free_event_buffers(dwc); 2367 2368 if (dwc->usb_psy) 2369 power_supply_put(dwc->usb_psy); 2370 } 2371 2372 #ifdef CONFIG_PM 2373 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2374 { 2375 int ret; 2376 2377 ret = reset_control_deassert(dwc->reset); 2378 if (ret) 2379 return ret; 2380 2381 ret = dwc3_clk_enable(dwc); 2382 if (ret) 2383 goto assert_reset; 2384 2385 ret = dwc3_core_init(dwc); 2386 if (ret) 2387 goto disable_clks; 2388 2389 return 0; 2390 2391 disable_clks: 2392 dwc3_clk_disable(dwc); 2393 assert_reset: 2394 reset_control_assert(dwc->reset); 2395 2396 return ret; 2397 } 2398 2399 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2400 { 2401 u32 reg; 2402 int i; 2403 2404 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { 2405 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & 2406 DWC3_GUSB2PHYCFG_SUSPHY) || 2407 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & 2408 DWC3_GUSB3PIPECTL_SUSPHY); 2409 /* 2410 * TI AM62 platform requires SUSPHY to be 2411 * enabled for system suspend to work. 2412 */ 2413 if (!dwc->susphy_state) 2414 dwc3_enable_susphy(dwc, true); 2415 } 2416 2417 switch (dwc->current_dr_role) { 2418 case DWC3_GCTL_PRTCAP_DEVICE: 2419 if (pm_runtime_suspended(dwc->dev)) 2420 break; 2421 dwc3_gadget_suspend(dwc); 2422 synchronize_irq(dwc->irq_gadget); 2423 dwc3_core_exit(dwc); 2424 break; 2425 case DWC3_GCTL_PRTCAP_HOST: 2426 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2427 dwc3_core_exit(dwc); 2428 break; 2429 } 2430 2431 /* Let controller to suspend HSPHY before PHY driver suspends */ 2432 if (dwc->dis_u2_susphy_quirk || 2433 dwc->dis_enblslpm_quirk) { 2434 for (i = 0; i < dwc->num_usb2_ports; i++) { 2435 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2436 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2437 DWC3_GUSB2PHYCFG_SUSPHY; 2438 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2439 } 2440 2441 /* Give some time for USB2 PHY to suspend */ 2442 usleep_range(5000, 6000); 2443 } 2444 2445 for (i = 0; i < dwc->num_usb2_ports; i++) 2446 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]); 2447 for (i = 0; i < dwc->num_usb3_ports; i++) 2448 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]); 2449 break; 2450 case DWC3_GCTL_PRTCAP_OTG: 2451 /* do nothing during runtime_suspend */ 2452 if (PMSG_IS_AUTO(msg)) 2453 break; 2454 2455 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2456 dwc3_gadget_suspend(dwc); 2457 synchronize_irq(dwc->irq_gadget); 2458 } 2459 2460 dwc3_otg_exit(dwc); 2461 dwc3_core_exit(dwc); 2462 break; 2463 default: 2464 /* do nothing */ 2465 break; 2466 } 2467 2468 return 0; 2469 } 2470 2471 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2472 { 2473 int ret; 2474 u32 reg; 2475 int i; 2476 2477 switch (dwc->current_dr_role) { 2478 case DWC3_GCTL_PRTCAP_DEVICE: 2479 ret = dwc3_core_init_for_resume(dwc); 2480 if (ret) 2481 return ret; 2482 2483 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); 2484 dwc3_gadget_resume(dwc); 2485 break; 2486 case DWC3_GCTL_PRTCAP_HOST: 2487 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2488 ret = dwc3_core_init_for_resume(dwc); 2489 if (ret) 2490 return ret; 2491 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); 2492 break; 2493 } 2494 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2495 for (i = 0; i < dwc->num_usb2_ports; i++) { 2496 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i)); 2497 if (dwc->dis_u2_susphy_quirk) 2498 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2499 2500 if (dwc->dis_enblslpm_quirk) 2501 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2502 2503 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg); 2504 } 2505 2506 for (i = 0; i < dwc->num_usb2_ports; i++) 2507 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]); 2508 for (i = 0; i < dwc->num_usb3_ports; i++) 2509 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]); 2510 break; 2511 case DWC3_GCTL_PRTCAP_OTG: 2512 /* nothing to do on runtime_resume */ 2513 if (PMSG_IS_AUTO(msg)) 2514 break; 2515 2516 ret = dwc3_core_init_for_resume(dwc); 2517 if (ret) 2518 return ret; 2519 2520 dwc3_set_prtcap(dwc, dwc->current_dr_role, true); 2521 2522 dwc3_otg_init(dwc); 2523 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2524 dwc3_otg_host_init(dwc); 2525 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2526 dwc3_gadget_resume(dwc); 2527 } 2528 2529 break; 2530 default: 2531 /* do nothing */ 2532 break; 2533 } 2534 2535 if (!PMSG_IS_AUTO(msg)) { 2536 /* restore SUSPHY state to that before system suspend. */ 2537 dwc3_enable_susphy(dwc, dwc->susphy_state); 2538 } 2539 2540 return 0; 2541 } 2542 2543 static int dwc3_runtime_checks(struct dwc3 *dwc) 2544 { 2545 switch (dwc->current_dr_role) { 2546 case DWC3_GCTL_PRTCAP_DEVICE: 2547 if (dwc->connected) 2548 return -EBUSY; 2549 break; 2550 case DWC3_GCTL_PRTCAP_HOST: 2551 default: 2552 /* do nothing */ 2553 break; 2554 } 2555 2556 return 0; 2557 } 2558 2559 static int dwc3_runtime_suspend(struct device *dev) 2560 { 2561 struct dwc3 *dwc = dev_get_drvdata(dev); 2562 int ret; 2563 2564 if (dwc3_runtime_checks(dwc)) 2565 return -EBUSY; 2566 2567 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2568 if (ret) 2569 return ret; 2570 2571 return 0; 2572 } 2573 2574 static int dwc3_runtime_resume(struct device *dev) 2575 { 2576 struct dwc3 *dwc = dev_get_drvdata(dev); 2577 int ret; 2578 2579 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2580 if (ret) 2581 return ret; 2582 2583 switch (dwc->current_dr_role) { 2584 case DWC3_GCTL_PRTCAP_DEVICE: 2585 if (dwc->pending_events) { 2586 pm_runtime_put(dwc->dev); 2587 dwc->pending_events = false; 2588 enable_irq(dwc->irq_gadget); 2589 } 2590 break; 2591 case DWC3_GCTL_PRTCAP_HOST: 2592 default: 2593 /* do nothing */ 2594 break; 2595 } 2596 2597 pm_runtime_mark_last_busy(dev); 2598 2599 return 0; 2600 } 2601 2602 static int dwc3_runtime_idle(struct device *dev) 2603 { 2604 struct dwc3 *dwc = dev_get_drvdata(dev); 2605 2606 switch (dwc->current_dr_role) { 2607 case DWC3_GCTL_PRTCAP_DEVICE: 2608 if (dwc3_runtime_checks(dwc)) 2609 return -EBUSY; 2610 break; 2611 case DWC3_GCTL_PRTCAP_HOST: 2612 default: 2613 /* do nothing */ 2614 break; 2615 } 2616 2617 pm_runtime_mark_last_busy(dev); 2618 pm_runtime_autosuspend(dev); 2619 2620 return 0; 2621 } 2622 #endif /* CONFIG_PM */ 2623 2624 #ifdef CONFIG_PM_SLEEP 2625 static int dwc3_suspend(struct device *dev) 2626 { 2627 struct dwc3 *dwc = dev_get_drvdata(dev); 2628 int ret; 2629 2630 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2631 if (ret) 2632 return ret; 2633 2634 pinctrl_pm_select_sleep_state(dev); 2635 2636 return 0; 2637 } 2638 2639 static int dwc3_resume(struct device *dev) 2640 { 2641 struct dwc3 *dwc = dev_get_drvdata(dev); 2642 int ret = 0; 2643 2644 pinctrl_pm_select_default_state(dev); 2645 2646 pm_runtime_disable(dev); 2647 ret = pm_runtime_set_active(dev); 2648 if (ret) 2649 goto out; 2650 2651 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2652 if (ret) 2653 pm_runtime_set_suspended(dev); 2654 2655 out: 2656 pm_runtime_enable(dev); 2657 2658 return ret; 2659 } 2660 2661 static void dwc3_complete(struct device *dev) 2662 { 2663 struct dwc3 *dwc = dev_get_drvdata(dev); 2664 u32 reg; 2665 2666 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2667 dwc->dis_split_quirk) { 2668 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2669 reg |= DWC3_GUCTL3_SPLITDISABLE; 2670 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2671 } 2672 } 2673 #else 2674 #define dwc3_complete NULL 2675 #endif /* CONFIG_PM_SLEEP */ 2676 2677 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2678 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) 2679 .complete = dwc3_complete, 2680 2681 /* 2682 * Runtime suspend halts the controller on disconnection. It relies on 2683 * platforms with custom connection notification to start the controller 2684 * again. 2685 */ 2686 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, 2687 dwc3_runtime_idle) 2688 }; 2689 2690 #ifdef CONFIG_OF 2691 static const struct of_device_id of_dwc3_match[] = { 2692 { 2693 .compatible = "snps,dwc3" 2694 }, 2695 { 2696 .compatible = "synopsys,dwc3" 2697 }, 2698 { }, 2699 }; 2700 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2701 #endif 2702 2703 #ifdef CONFIG_ACPI 2704 2705 #define ACPI_ID_INTEL_BSW "808622B7" 2706 2707 static const struct acpi_device_id dwc3_acpi_match[] = { 2708 { ACPI_ID_INTEL_BSW, 0 }, 2709 { }, 2710 }; 2711 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2712 #endif 2713 2714 static struct platform_driver dwc3_driver = { 2715 .probe = dwc3_probe, 2716 .remove = dwc3_remove, 2717 .driver = { 2718 .name = "dwc3", 2719 .of_match_table = of_match_ptr(of_dwc3_match), 2720 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2721 .pm = &dwc3_dev_pm_ops, 2722 }, 2723 }; 2724 2725 module_platform_driver(dwc3_driver); 2726 2727 MODULE_ALIAS("platform:dwc3"); 2728 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2729 MODULE_LICENSE("GPL v2"); 2730 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2731