1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_graph.h> 27 #include <linux/acpi.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/reset.h> 30 #include <linux/bitfield.h> 31 32 #include <linux/usb/ch9.h> 33 #include <linux/usb/gadget.h> 34 #include <linux/usb/of.h> 35 #include <linux/usb/otg.h> 36 37 #include "core.h" 38 #include "gadget.h" 39 #include "io.h" 40 41 #include "debug.h" 42 43 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 44 45 /** 46 * dwc3_get_dr_mode - Validates and sets dr_mode 47 * @dwc: pointer to our context structure 48 */ 49 static int dwc3_get_dr_mode(struct dwc3 *dwc) 50 { 51 enum usb_dr_mode mode; 52 struct device *dev = dwc->dev; 53 unsigned int hw_mode; 54 55 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 56 dwc->dr_mode = USB_DR_MODE_OTG; 57 58 mode = dwc->dr_mode; 59 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 60 61 switch (hw_mode) { 62 case DWC3_GHWPARAMS0_MODE_GADGET: 63 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 64 dev_err(dev, 65 "Controller does not support host mode.\n"); 66 return -EINVAL; 67 } 68 mode = USB_DR_MODE_PERIPHERAL; 69 break; 70 case DWC3_GHWPARAMS0_MODE_HOST: 71 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 72 dev_err(dev, 73 "Controller does not support device mode.\n"); 74 return -EINVAL; 75 } 76 mode = USB_DR_MODE_HOST; 77 break; 78 default: 79 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 80 mode = USB_DR_MODE_HOST; 81 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 82 mode = USB_DR_MODE_PERIPHERAL; 83 84 /* 85 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 86 * mode. If the controller supports DRD but the dr_mode is not 87 * specified or set to OTG, then set the mode to peripheral. 88 */ 89 if (mode == USB_DR_MODE_OTG && !dwc->edev && 90 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 91 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 92 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 93 mode = USB_DR_MODE_PERIPHERAL; 94 } 95 96 if (mode != dwc->dr_mode) { 97 dev_warn(dev, 98 "Configuration mismatch. dr_mode forced to %s\n", 99 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 100 101 dwc->dr_mode = mode; 102 } 103 104 return 0; 105 } 106 107 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) 108 { 109 u32 reg; 110 111 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 112 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 113 reg |= DWC3_GCTL_PRTCAPDIR(mode); 114 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 115 116 dwc->current_dr_role = mode; 117 } 118 119 static void __dwc3_set_mode(struct work_struct *work) 120 { 121 struct dwc3 *dwc = work_to_dwc(work); 122 unsigned long flags; 123 int ret; 124 u32 reg; 125 u32 desired_dr_role; 126 127 mutex_lock(&dwc->mutex); 128 spin_lock_irqsave(&dwc->lock, flags); 129 desired_dr_role = dwc->desired_dr_role; 130 spin_unlock_irqrestore(&dwc->lock, flags); 131 132 pm_runtime_get_sync(dwc->dev); 133 134 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 135 dwc3_otg_update(dwc, 0); 136 137 if (!desired_dr_role) 138 goto out; 139 140 if (desired_dr_role == dwc->current_dr_role) 141 goto out; 142 143 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 144 goto out; 145 146 switch (dwc->current_dr_role) { 147 case DWC3_GCTL_PRTCAP_HOST: 148 dwc3_host_exit(dwc); 149 break; 150 case DWC3_GCTL_PRTCAP_DEVICE: 151 dwc3_gadget_exit(dwc); 152 dwc3_event_buffers_cleanup(dwc); 153 break; 154 case DWC3_GCTL_PRTCAP_OTG: 155 dwc3_otg_exit(dwc); 156 spin_lock_irqsave(&dwc->lock, flags); 157 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 158 spin_unlock_irqrestore(&dwc->lock, flags); 159 dwc3_otg_update(dwc, 1); 160 break; 161 default: 162 break; 163 } 164 165 /* 166 * When current_dr_role is not set, there's no role switching. 167 * Only perform GCTL.CoreSoftReset when there's DRD role switching. 168 */ 169 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || 170 DWC3_VER_IS_PRIOR(DWC31, 190A)) && 171 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { 172 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 173 reg |= DWC3_GCTL_CORESOFTRESET; 174 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 175 176 /* 177 * Wait for internal clocks to synchronized. DWC_usb31 and 178 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 179 * keep it consistent across different IPs, let's wait up to 180 * 100ms before clearing GCTL.CORESOFTRESET. 181 */ 182 msleep(100); 183 184 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 185 reg &= ~DWC3_GCTL_CORESOFTRESET; 186 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 187 } 188 189 spin_lock_irqsave(&dwc->lock, flags); 190 191 dwc3_set_prtcap(dwc, desired_dr_role); 192 193 spin_unlock_irqrestore(&dwc->lock, flags); 194 195 switch (desired_dr_role) { 196 case DWC3_GCTL_PRTCAP_HOST: 197 ret = dwc3_host_init(dwc); 198 if (ret) { 199 dev_err(dwc->dev, "failed to initialize host\n"); 200 } else { 201 if (dwc->usb2_phy) 202 otg_set_vbus(dwc->usb2_phy->otg, true); 203 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); 204 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); 205 if (dwc->dis_split_quirk) { 206 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 207 reg |= DWC3_GUCTL3_SPLITDISABLE; 208 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 209 } 210 } 211 break; 212 case DWC3_GCTL_PRTCAP_DEVICE: 213 dwc3_core_soft_reset(dwc); 214 215 dwc3_event_buffers_setup(dwc); 216 217 if (dwc->usb2_phy) 218 otg_set_vbus(dwc->usb2_phy->otg, false); 219 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); 220 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); 221 222 ret = dwc3_gadget_init(dwc); 223 if (ret) 224 dev_err(dwc->dev, "failed to initialize peripheral\n"); 225 break; 226 case DWC3_GCTL_PRTCAP_OTG: 227 dwc3_otg_init(dwc); 228 dwc3_otg_update(dwc, 0); 229 break; 230 default: 231 break; 232 } 233 234 out: 235 pm_runtime_mark_last_busy(dwc->dev); 236 pm_runtime_put_autosuspend(dwc->dev); 237 mutex_unlock(&dwc->mutex); 238 } 239 240 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 241 { 242 unsigned long flags; 243 244 if (dwc->dr_mode != USB_DR_MODE_OTG) 245 return; 246 247 spin_lock_irqsave(&dwc->lock, flags); 248 dwc->desired_dr_role = mode; 249 spin_unlock_irqrestore(&dwc->lock, flags); 250 251 queue_work(system_freezable_wq, &dwc->drd_work); 252 } 253 254 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 255 { 256 struct dwc3 *dwc = dep->dwc; 257 u32 reg; 258 259 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 260 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 261 DWC3_GDBGFIFOSPACE_TYPE(type)); 262 263 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 264 265 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 266 } 267 268 /** 269 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 270 * @dwc: pointer to our context structure 271 */ 272 int dwc3_core_soft_reset(struct dwc3 *dwc) 273 { 274 u32 reg; 275 int retries = 1000; 276 277 /* 278 * We're resetting only the device side because, if we're in host mode, 279 * XHCI driver will reset the host block. If dwc3 was configured for 280 * host-only mode, then we can return early. 281 */ 282 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 283 return 0; 284 285 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 286 reg |= DWC3_DCTL_CSFTRST; 287 reg &= ~DWC3_DCTL_RUN_STOP; 288 dwc3_gadget_dctl_write_safe(dwc, reg); 289 290 /* 291 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 292 * is cleared only after all the clocks are synchronized. This can 293 * take a little more than 50ms. Set the polling rate at 20ms 294 * for 10 times instead. 295 */ 296 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 297 retries = 10; 298 299 do { 300 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 301 if (!(reg & DWC3_DCTL_CSFTRST)) 302 goto done; 303 304 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 305 msleep(20); 306 else 307 udelay(1); 308 } while (--retries); 309 310 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n"); 311 return -ETIMEDOUT; 312 313 done: 314 /* 315 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 316 * is cleared, we must wait at least 50ms before accessing the PHY 317 * domain (synchronization delay). 318 */ 319 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 320 msleep(50); 321 322 return 0; 323 } 324 325 /* 326 * dwc3_frame_length_adjustment - Adjusts frame length if required 327 * @dwc3: Pointer to our controller context structure 328 */ 329 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 330 { 331 u32 reg; 332 u32 dft; 333 334 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 335 return; 336 337 if (dwc->fladj == 0) 338 return; 339 340 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 341 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 342 if (dft != dwc->fladj) { 343 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 344 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 345 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 346 } 347 } 348 349 /** 350 * dwc3_ref_clk_period - Reference clock period configuration 351 * Default reference clock period depends on hardware 352 * configuration. For systems with reference clock that differs 353 * from the default, this will set clock period in DWC3_GUCTL 354 * register. 355 * @dwc: Pointer to our controller context structure 356 */ 357 static void dwc3_ref_clk_period(struct dwc3 *dwc) 358 { 359 unsigned long period; 360 unsigned long fladj; 361 unsigned long decr; 362 unsigned long rate; 363 u32 reg; 364 365 if (dwc->ref_clk) { 366 rate = clk_get_rate(dwc->ref_clk); 367 if (!rate) 368 return; 369 period = NSEC_PER_SEC / rate; 370 } else if (dwc->ref_clk_per) { 371 period = dwc->ref_clk_per; 372 rate = NSEC_PER_SEC / period; 373 } else { 374 return; 375 } 376 377 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 378 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 379 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 380 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 381 382 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 383 return; 384 385 /* 386 * The calculation below is 387 * 388 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 389 * 390 * but rearranged for fixed-point arithmetic. The division must be 391 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 392 * neither does rate * period). 393 * 394 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 395 * nanoseconds of error caused by the truncation which happened during 396 * the division when calculating rate or period (whichever one was 397 * derived from the other). We first calculate the relative error, then 398 * scale it to units of 8 ppm. 399 */ 400 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 401 fladj -= 125000; 402 403 /* 404 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 405 */ 406 decr = 480000000 / rate; 407 408 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 409 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 410 & ~DWC3_GFLADJ_240MHZDECR 411 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 412 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 413 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 414 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 415 416 if (dwc->gfladj_refclk_lpm_sel) 417 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL; 418 419 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 420 } 421 422 /** 423 * dwc3_free_one_event_buffer - Frees one event buffer 424 * @dwc: Pointer to our controller context structure 425 * @evt: Pointer to event buffer to be freed 426 */ 427 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 428 struct dwc3_event_buffer *evt) 429 { 430 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 431 } 432 433 /** 434 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 435 * @dwc: Pointer to our controller context structure 436 * @length: size of the event buffer 437 * 438 * Returns a pointer to the allocated event buffer structure on success 439 * otherwise ERR_PTR(errno). 440 */ 441 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 442 unsigned int length) 443 { 444 struct dwc3_event_buffer *evt; 445 446 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 447 if (!evt) 448 return ERR_PTR(-ENOMEM); 449 450 evt->dwc = dwc; 451 evt->length = length; 452 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 453 if (!evt->cache) 454 return ERR_PTR(-ENOMEM); 455 456 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 457 &evt->dma, GFP_KERNEL); 458 if (!evt->buf) 459 return ERR_PTR(-ENOMEM); 460 461 return evt; 462 } 463 464 /** 465 * dwc3_free_event_buffers - frees all allocated event buffers 466 * @dwc: Pointer to our controller context structure 467 */ 468 static void dwc3_free_event_buffers(struct dwc3 *dwc) 469 { 470 struct dwc3_event_buffer *evt; 471 472 evt = dwc->ev_buf; 473 if (evt) 474 dwc3_free_one_event_buffer(dwc, evt); 475 } 476 477 /** 478 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 479 * @dwc: pointer to our controller context structure 480 * @length: size of event buffer 481 * 482 * Returns 0 on success otherwise negative errno. In the error case, dwc 483 * may contain some buffers allocated but not all which were requested. 484 */ 485 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) 486 { 487 struct dwc3_event_buffer *evt; 488 489 evt = dwc3_alloc_one_event_buffer(dwc, length); 490 if (IS_ERR(evt)) { 491 dev_err(dwc->dev, "can't allocate event buffer\n"); 492 return PTR_ERR(evt); 493 } 494 dwc->ev_buf = evt; 495 496 return 0; 497 } 498 499 /** 500 * dwc3_event_buffers_setup - setup our allocated event buffers 501 * @dwc: pointer to our controller context structure 502 * 503 * Returns 0 on success otherwise negative errno. 504 */ 505 int dwc3_event_buffers_setup(struct dwc3 *dwc) 506 { 507 struct dwc3_event_buffer *evt; 508 509 evt = dwc->ev_buf; 510 evt->lpos = 0; 511 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 512 lower_32_bits(evt->dma)); 513 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 514 upper_32_bits(evt->dma)); 515 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 516 DWC3_GEVNTSIZ_SIZE(evt->length)); 517 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); 518 519 return 0; 520 } 521 522 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 523 { 524 struct dwc3_event_buffer *evt; 525 526 evt = dwc->ev_buf; 527 528 evt->lpos = 0; 529 530 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 531 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 532 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 533 | DWC3_GEVNTSIZ_SIZE(0)); 534 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); 535 } 536 537 static void dwc3_core_num_eps(struct dwc3 *dwc) 538 { 539 struct dwc3_hwparams *parms = &dwc->hwparams; 540 541 dwc->num_eps = DWC3_NUM_EPS(parms); 542 } 543 544 static void dwc3_cache_hwparams(struct dwc3 *dwc) 545 { 546 struct dwc3_hwparams *parms = &dwc->hwparams; 547 548 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 549 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 550 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 551 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 552 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 553 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 554 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 555 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 556 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 557 558 if (DWC3_IP_IS(DWC32)) 559 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 560 } 561 562 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 563 { 564 int intf; 565 int ret = 0; 566 567 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 568 569 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 570 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 571 dwc->hsphy_interface && 572 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 573 ret = dwc3_ulpi_init(dwc); 574 575 return ret; 576 } 577 578 /** 579 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 580 * @dwc: Pointer to our controller context structure 581 * 582 * Returns 0 on success. The USB PHY interfaces are configured but not 583 * initialized. The PHY interfaces and the PHYs get initialized together with 584 * the core in dwc3_core_init. 585 */ 586 static int dwc3_phy_setup(struct dwc3 *dwc) 587 { 588 unsigned int hw_mode; 589 u32 reg; 590 591 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 592 593 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 594 595 /* 596 * Make sure UX_EXIT_PX is cleared as that causes issues with some 597 * PHYs. Also, this bit is not supposed to be used in normal operation. 598 */ 599 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 600 601 /* 602 * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY 603 * to '0' during coreConsultant configuration. So default value 604 * will be '0' when the core is reset. Application needs to set it 605 * to '1' after the core initialization is completed. 606 */ 607 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 608 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 609 610 /* 611 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after 612 * power-on reset, and it can be set after core initialization, which is 613 * after device soft-reset during initialization. 614 */ 615 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) 616 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 617 618 if (dwc->u2ss_inp3_quirk) 619 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 620 621 if (dwc->dis_rxdet_inp3_quirk) 622 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 623 624 if (dwc->req_p1p2p3_quirk) 625 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 626 627 if (dwc->del_p1p2p3_quirk) 628 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 629 630 if (dwc->del_phy_power_chg_quirk) 631 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 632 633 if (dwc->lfps_filter_quirk) 634 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 635 636 if (dwc->rx_detect_poll_quirk) 637 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 638 639 if (dwc->tx_de_emphasis_quirk) 640 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 641 642 if (dwc->dis_u3_susphy_quirk) 643 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 644 645 if (dwc->dis_del_phy_power_chg_quirk) 646 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 647 648 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 649 650 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 651 652 /* Select the HS PHY interface */ 653 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 654 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 655 if (dwc->hsphy_interface && 656 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 657 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 658 break; 659 } else if (dwc->hsphy_interface && 660 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 661 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 662 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 663 } else { 664 /* Relying on default value. */ 665 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 666 break; 667 } 668 fallthrough; 669 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 670 default: 671 break; 672 } 673 674 switch (dwc->hsphy_mode) { 675 case USBPHY_INTERFACE_MODE_UTMI: 676 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 677 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 678 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 679 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 680 break; 681 case USBPHY_INTERFACE_MODE_UTMIW: 682 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 683 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 684 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 685 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 686 break; 687 default: 688 break; 689 } 690 691 /* 692 * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to 693 * '0' during coreConsultant configuration. So default value will 694 * be '0' when the core is reset. Application needs to set it to 695 * '1' after the core initialization is completed. 696 */ 697 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 698 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 699 700 /* 701 * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after 702 * power-on reset, and it can be set after core initialization, which is 703 * after device soft-reset during initialization. 704 */ 705 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) 706 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 707 708 if (dwc->dis_u2_susphy_quirk) 709 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 710 711 if (dwc->dis_enblslpm_quirk) 712 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 713 else 714 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 715 716 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel) 717 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 718 719 /* 720 * Some ULPI USB PHY does not support internal VBUS supply, to drive 721 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL 722 * bit of OTG_CTRL register. Controller configures the USB2 PHY 723 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus 724 * with an external supply. 725 */ 726 if (dwc->ulpi_ext_vbus_drv) 727 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV; 728 729 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 730 731 return 0; 732 } 733 734 static int dwc3_phy_init(struct dwc3 *dwc) 735 { 736 int ret; 737 738 usb_phy_init(dwc->usb2_phy); 739 usb_phy_init(dwc->usb3_phy); 740 741 ret = phy_init(dwc->usb2_generic_phy); 742 if (ret < 0) 743 goto err_shutdown_usb3_phy; 744 745 ret = phy_init(dwc->usb3_generic_phy); 746 if (ret < 0) 747 goto err_exit_usb2_phy; 748 749 return 0; 750 751 err_exit_usb2_phy: 752 phy_exit(dwc->usb2_generic_phy); 753 err_shutdown_usb3_phy: 754 usb_phy_shutdown(dwc->usb3_phy); 755 usb_phy_shutdown(dwc->usb2_phy); 756 757 return ret; 758 } 759 760 static void dwc3_phy_exit(struct dwc3 *dwc) 761 { 762 phy_exit(dwc->usb3_generic_phy); 763 phy_exit(dwc->usb2_generic_phy); 764 765 usb_phy_shutdown(dwc->usb3_phy); 766 usb_phy_shutdown(dwc->usb2_phy); 767 } 768 769 static int dwc3_phy_power_on(struct dwc3 *dwc) 770 { 771 int ret; 772 773 usb_phy_set_suspend(dwc->usb2_phy, 0); 774 usb_phy_set_suspend(dwc->usb3_phy, 0); 775 776 ret = phy_power_on(dwc->usb2_generic_phy); 777 if (ret < 0) 778 goto err_suspend_usb3_phy; 779 780 ret = phy_power_on(dwc->usb3_generic_phy); 781 if (ret < 0) 782 goto err_power_off_usb2_phy; 783 784 return 0; 785 786 err_power_off_usb2_phy: 787 phy_power_off(dwc->usb2_generic_phy); 788 err_suspend_usb3_phy: 789 usb_phy_set_suspend(dwc->usb3_phy, 1); 790 usb_phy_set_suspend(dwc->usb2_phy, 1); 791 792 return ret; 793 } 794 795 static void dwc3_phy_power_off(struct dwc3 *dwc) 796 { 797 phy_power_off(dwc->usb3_generic_phy); 798 phy_power_off(dwc->usb2_generic_phy); 799 800 usb_phy_set_suspend(dwc->usb3_phy, 1); 801 usb_phy_set_suspend(dwc->usb2_phy, 1); 802 } 803 804 static int dwc3_clk_enable(struct dwc3 *dwc) 805 { 806 int ret; 807 808 ret = clk_prepare_enable(dwc->bus_clk); 809 if (ret) 810 return ret; 811 812 ret = clk_prepare_enable(dwc->ref_clk); 813 if (ret) 814 goto disable_bus_clk; 815 816 ret = clk_prepare_enable(dwc->susp_clk); 817 if (ret) 818 goto disable_ref_clk; 819 820 ret = clk_prepare_enable(dwc->utmi_clk); 821 if (ret) 822 goto disable_susp_clk; 823 824 ret = clk_prepare_enable(dwc->pipe_clk); 825 if (ret) 826 goto disable_utmi_clk; 827 828 return 0; 829 830 disable_utmi_clk: 831 clk_disable_unprepare(dwc->utmi_clk); 832 disable_susp_clk: 833 clk_disable_unprepare(dwc->susp_clk); 834 disable_ref_clk: 835 clk_disable_unprepare(dwc->ref_clk); 836 disable_bus_clk: 837 clk_disable_unprepare(dwc->bus_clk); 838 return ret; 839 } 840 841 static void dwc3_clk_disable(struct dwc3 *dwc) 842 { 843 clk_disable_unprepare(dwc->pipe_clk); 844 clk_disable_unprepare(dwc->utmi_clk); 845 clk_disable_unprepare(dwc->susp_clk); 846 clk_disable_unprepare(dwc->ref_clk); 847 clk_disable_unprepare(dwc->bus_clk); 848 } 849 850 static void dwc3_core_exit(struct dwc3 *dwc) 851 { 852 dwc3_event_buffers_cleanup(dwc); 853 dwc3_phy_power_off(dwc); 854 dwc3_phy_exit(dwc); 855 dwc3_clk_disable(dwc); 856 reset_control_assert(dwc->reset); 857 } 858 859 static bool dwc3_core_is_valid(struct dwc3 *dwc) 860 { 861 u32 reg; 862 863 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 864 dwc->ip = DWC3_GSNPS_ID(reg); 865 866 /* This should read as U3 followed by revision number */ 867 if (DWC3_IP_IS(DWC3)) { 868 dwc->revision = reg; 869 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 870 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 871 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 872 } else { 873 return false; 874 } 875 876 return true; 877 } 878 879 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 880 { 881 u32 reg; 882 883 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 884 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 885 886 switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) { 887 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 888 /** 889 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 890 * issue which would cause xHCI compliance tests to fail. 891 * 892 * Because of that we cannot enable clock gating on such 893 * configurations. 894 * 895 * Refers to: 896 * 897 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 898 * SOF/ITP Mode Used 899 */ 900 if ((dwc->dr_mode == USB_DR_MODE_HOST || 901 dwc->dr_mode == USB_DR_MODE_OTG) && 902 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 903 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 904 else 905 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 906 break; 907 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 908 /* 909 * REVISIT Enabling this bit so that host-mode hibernation 910 * will work. Device-mode hibernation is not yet implemented. 911 */ 912 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 913 break; 914 default: 915 /* nothing */ 916 break; 917 } 918 919 /* check if current dwc3 is on simulation board */ 920 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 921 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 922 dwc->is_fpga = true; 923 } 924 925 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 926 "disable_scramble cannot be used on non-FPGA builds\n"); 927 928 if (dwc->disable_scramble_quirk && dwc->is_fpga) 929 reg |= DWC3_GCTL_DISSCRAMBLE; 930 else 931 reg &= ~DWC3_GCTL_DISSCRAMBLE; 932 933 if (dwc->u2exit_lfps_quirk) 934 reg |= DWC3_GCTL_U2EXIT_LFPS; 935 936 /* 937 * WORKAROUND: DWC3 revisions <1.90a have a bug 938 * where the device can fail to connect at SuperSpeed 939 * and falls back to high-speed mode which causes 940 * the device to enter a Connect/Disconnect loop 941 */ 942 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 943 reg |= DWC3_GCTL_U2RSTECN; 944 945 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 946 } 947 948 static int dwc3_core_get_phy(struct dwc3 *dwc); 949 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 950 951 /* set global incr burst type configuration registers */ 952 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 953 { 954 struct device *dev = dwc->dev; 955 /* incrx_mode : for INCR burst type. */ 956 bool incrx_mode; 957 /* incrx_size : for size of INCRX burst. */ 958 u32 incrx_size; 959 u32 *vals; 960 u32 cfg; 961 int ntype; 962 int ret; 963 int i; 964 965 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 966 967 /* 968 * Handle property "snps,incr-burst-type-adjustment". 969 * Get the number of value from this property: 970 * result <= 0, means this property is not supported. 971 * result = 1, means INCRx burst mode supported. 972 * result > 1, means undefined length burst mode supported. 973 */ 974 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 975 if (ntype <= 0) 976 return; 977 978 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 979 if (!vals) 980 return; 981 982 /* Get INCR burst type, and parse it */ 983 ret = device_property_read_u32_array(dev, 984 "snps,incr-burst-type-adjustment", vals, ntype); 985 if (ret) { 986 kfree(vals); 987 dev_err(dev, "Error to get property\n"); 988 return; 989 } 990 991 incrx_size = *vals; 992 993 if (ntype > 1) { 994 /* INCRX (undefined length) burst mode */ 995 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 996 for (i = 1; i < ntype; i++) { 997 if (vals[i] > incrx_size) 998 incrx_size = vals[i]; 999 } 1000 } else { 1001 /* INCRX burst mode */ 1002 incrx_mode = INCRX_BURST_MODE; 1003 } 1004 1005 kfree(vals); 1006 1007 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 1008 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 1009 if (incrx_mode) 1010 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1011 switch (incrx_size) { 1012 case 256: 1013 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1014 break; 1015 case 128: 1016 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1017 break; 1018 case 64: 1019 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1020 break; 1021 case 32: 1022 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1023 break; 1024 case 16: 1025 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1026 break; 1027 case 8: 1028 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1029 break; 1030 case 4: 1031 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1032 break; 1033 case 1: 1034 break; 1035 default: 1036 dev_err(dev, "Invalid property\n"); 1037 break; 1038 } 1039 1040 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1041 } 1042 1043 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc) 1044 { 1045 u32 scale; 1046 u32 reg; 1047 1048 if (!dwc->susp_clk) 1049 return; 1050 1051 /* 1052 * The power down scale field specifies how many suspend_clk 1053 * periods fit into a 16KHz clock period. When performing 1054 * the division, round up the remainder. 1055 * 1056 * The power down scale value is calculated using the fastest 1057 * frequency of the suspend_clk. If it isn't fixed (but within 1058 * the accuracy requirement), the driver may not know the max 1059 * rate of the suspend_clk, so only update the power down scale 1060 * if the default is less than the calculated value from 1061 * clk_get_rate() or if the default is questionably high 1062 * (3x or more) to be within the requirement. 1063 */ 1064 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000); 1065 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 1066 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) || 1067 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) { 1068 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK); 1069 reg |= DWC3_GCTL_PWRDNSCALE(scale); 1070 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 1071 } 1072 } 1073 1074 static void dwc3_config_threshold(struct dwc3 *dwc) 1075 { 1076 u32 reg; 1077 u8 rx_thr_num; 1078 u8 rx_maxburst; 1079 u8 tx_thr_num; 1080 u8 tx_maxburst; 1081 1082 /* 1083 * Must config both number of packets and max burst settings to enable 1084 * RX and/or TX threshold. 1085 */ 1086 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1087 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1088 rx_maxburst = dwc->rx_max_burst_prd; 1089 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1090 tx_maxburst = dwc->tx_max_burst_prd; 1091 1092 if (rx_thr_num && rx_maxburst) { 1093 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1094 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1095 1096 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1097 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1098 1099 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1100 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1101 1102 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1103 } 1104 1105 if (tx_thr_num && tx_maxburst) { 1106 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1107 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1108 1109 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1110 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1111 1112 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1113 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1114 1115 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1116 } 1117 } 1118 1119 rx_thr_num = dwc->rx_thr_num_pkt; 1120 rx_maxburst = dwc->rx_max_burst; 1121 tx_thr_num = dwc->tx_thr_num_pkt; 1122 tx_maxburst = dwc->tx_max_burst; 1123 1124 if (DWC3_IP_IS(DWC3)) { 1125 if (rx_thr_num && rx_maxburst) { 1126 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1127 reg |= DWC3_GRXTHRCFG_PKTCNTSEL; 1128 1129 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0); 1130 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1131 1132 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1133 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1134 1135 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1136 } 1137 1138 if (tx_thr_num && tx_maxburst) { 1139 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1140 reg |= DWC3_GTXTHRCFG_PKTCNTSEL; 1141 1142 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0); 1143 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1144 1145 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1146 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1147 1148 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1149 } 1150 } else { 1151 if (rx_thr_num && rx_maxburst) { 1152 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1153 reg |= DWC31_GRXTHRCFG_PKTCNTSEL; 1154 1155 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0); 1156 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num); 1157 1158 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0); 1159 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst); 1160 1161 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1162 } 1163 1164 if (tx_thr_num && tx_maxburst) { 1165 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1166 reg |= DWC31_GTXTHRCFG_PKTCNTSEL; 1167 1168 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0); 1169 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num); 1170 1171 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0); 1172 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst); 1173 1174 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1175 } 1176 } 1177 } 1178 1179 /** 1180 * dwc3_core_init - Low-level initialization of DWC3 Core 1181 * @dwc: Pointer to our controller context structure 1182 * 1183 * Returns 0 on success otherwise negative errno. 1184 */ 1185 static int dwc3_core_init(struct dwc3 *dwc) 1186 { 1187 unsigned int hw_mode; 1188 u32 reg; 1189 int ret; 1190 1191 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1192 1193 /* 1194 * Write Linux Version Code to our GUID register so it's easy to figure 1195 * out which kernel version a bug was found. 1196 */ 1197 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1198 1199 ret = dwc3_phy_setup(dwc); 1200 if (ret) 1201 return ret; 1202 1203 if (!dwc->ulpi_ready) { 1204 ret = dwc3_core_ulpi_init(dwc); 1205 if (ret) { 1206 if (ret == -ETIMEDOUT) { 1207 dwc3_core_soft_reset(dwc); 1208 ret = -EPROBE_DEFER; 1209 } 1210 return ret; 1211 } 1212 dwc->ulpi_ready = true; 1213 } 1214 1215 if (!dwc->phys_ready) { 1216 ret = dwc3_core_get_phy(dwc); 1217 if (ret) 1218 goto err_exit_ulpi; 1219 dwc->phys_ready = true; 1220 } 1221 1222 ret = dwc3_phy_init(dwc); 1223 if (ret) 1224 goto err_exit_ulpi; 1225 1226 ret = dwc3_core_soft_reset(dwc); 1227 if (ret) 1228 goto err_exit_phy; 1229 1230 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && 1231 !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { 1232 if (!dwc->dis_u3_susphy_quirk) { 1233 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 1234 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 1235 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 1236 } 1237 1238 if (!dwc->dis_u2_susphy_quirk) { 1239 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1240 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 1241 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1242 } 1243 } 1244 1245 dwc3_core_setup_global_control(dwc); 1246 dwc3_core_num_eps(dwc); 1247 1248 /* Set power down scale of suspend_clk */ 1249 dwc3_set_power_down_clk_scale(dwc); 1250 1251 /* Adjust Frame Length */ 1252 dwc3_frame_length_adjustment(dwc); 1253 1254 /* Adjust Reference Clock Period */ 1255 dwc3_ref_clk_period(dwc); 1256 1257 dwc3_set_incr_burst_type(dwc); 1258 1259 ret = dwc3_phy_power_on(dwc); 1260 if (ret) 1261 goto err_exit_phy; 1262 1263 ret = dwc3_event_buffers_setup(dwc); 1264 if (ret) { 1265 dev_err(dwc->dev, "failed to setup event buffers\n"); 1266 goto err_power_off_phy; 1267 } 1268 1269 /* 1270 * ENDXFER polling is available on version 3.10a and later of 1271 * the DWC_usb3 controller. It is NOT available in the 1272 * DWC_usb31 controller. 1273 */ 1274 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1275 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1276 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1277 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1278 } 1279 1280 /* 1281 * When configured in HOST mode, after issuing U3/L2 exit controller 1282 * fails to send proper CRC checksum in CRC5 feild. Because of this 1283 * behaviour Transaction Error is generated, resulting in reset and 1284 * re-enumeration of usb device attached. All the termsel, xcvrsel, 1285 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1 1286 * will correct this problem. This option is to support certain 1287 * legacy ULPI PHYs. 1288 */ 1289 if (dwc->resume_hs_terminations) { 1290 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1291 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST; 1292 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1293 } 1294 1295 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1296 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1297 1298 /* 1299 * Enable hardware control of sending remote wakeup 1300 * in HS when the device is in the L1 state. 1301 */ 1302 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1303 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1304 1305 /* 1306 * Decouple USB 2.0 L1 & L2 events which will allow for 1307 * gadget driver to only receive U3/L2 suspend & wakeup 1308 * events and prevent the more frequent L1 LPM transitions 1309 * from interrupting the driver. 1310 */ 1311 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1312 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1313 1314 if (dwc->dis_tx_ipgap_linecheck_quirk) 1315 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1316 1317 if (dwc->parkmode_disable_ss_quirk) 1318 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1319 1320 if (dwc->parkmode_disable_hs_quirk) 1321 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS; 1322 1323 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) && 1324 (dwc->maximum_speed == USB_SPEED_HIGH || 1325 dwc->maximum_speed == USB_SPEED_FULL)) 1326 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1327 1328 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1329 } 1330 1331 dwc3_config_threshold(dwc); 1332 1333 /* 1334 * Modify this for all supported Super Speed ports when 1335 * multiport support is added. 1336 */ 1337 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && 1338 (DWC3_IP_IS(DWC31)) && 1339 dwc->maximum_speed == USB_SPEED_SUPER) { 1340 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL); 1341 reg |= DWC3_LLUCTL_FORCE_GEN1; 1342 dwc3_writel(dwc->regs, DWC3_LLUCTL, reg); 1343 } 1344 1345 return 0; 1346 1347 err_power_off_phy: 1348 dwc3_phy_power_off(dwc); 1349 err_exit_phy: 1350 dwc3_phy_exit(dwc); 1351 err_exit_ulpi: 1352 dwc3_ulpi_exit(dwc); 1353 1354 return ret; 1355 } 1356 1357 static int dwc3_core_get_phy(struct dwc3 *dwc) 1358 { 1359 struct device *dev = dwc->dev; 1360 struct device_node *node = dev->of_node; 1361 int ret; 1362 1363 if (node) { 1364 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1365 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1366 } else { 1367 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1368 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1369 } 1370 1371 if (IS_ERR(dwc->usb2_phy)) { 1372 ret = PTR_ERR(dwc->usb2_phy); 1373 if (ret == -ENXIO || ret == -ENODEV) 1374 dwc->usb2_phy = NULL; 1375 else 1376 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1377 } 1378 1379 if (IS_ERR(dwc->usb3_phy)) { 1380 ret = PTR_ERR(dwc->usb3_phy); 1381 if (ret == -ENXIO || ret == -ENODEV) 1382 dwc->usb3_phy = NULL; 1383 else 1384 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1385 } 1386 1387 dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy"); 1388 if (IS_ERR(dwc->usb2_generic_phy)) { 1389 ret = PTR_ERR(dwc->usb2_generic_phy); 1390 if (ret == -ENOSYS || ret == -ENODEV) 1391 dwc->usb2_generic_phy = NULL; 1392 else 1393 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1394 } 1395 1396 dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); 1397 if (IS_ERR(dwc->usb3_generic_phy)) { 1398 ret = PTR_ERR(dwc->usb3_generic_phy); 1399 if (ret == -ENOSYS || ret == -ENODEV) 1400 dwc->usb3_generic_phy = NULL; 1401 else 1402 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1403 } 1404 1405 return 0; 1406 } 1407 1408 static int dwc3_core_init_mode(struct dwc3 *dwc) 1409 { 1410 struct device *dev = dwc->dev; 1411 int ret; 1412 1413 switch (dwc->dr_mode) { 1414 case USB_DR_MODE_PERIPHERAL: 1415 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 1416 1417 if (dwc->usb2_phy) 1418 otg_set_vbus(dwc->usb2_phy->otg, false); 1419 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); 1420 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); 1421 1422 ret = dwc3_gadget_init(dwc); 1423 if (ret) 1424 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1425 break; 1426 case USB_DR_MODE_HOST: 1427 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 1428 1429 if (dwc->usb2_phy) 1430 otg_set_vbus(dwc->usb2_phy->otg, true); 1431 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); 1432 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); 1433 1434 ret = dwc3_host_init(dwc); 1435 if (ret) 1436 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1437 break; 1438 case USB_DR_MODE_OTG: 1439 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1440 ret = dwc3_drd_init(dwc); 1441 if (ret) 1442 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1443 break; 1444 default: 1445 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1446 return -EINVAL; 1447 } 1448 1449 return 0; 1450 } 1451 1452 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1453 { 1454 switch (dwc->dr_mode) { 1455 case USB_DR_MODE_PERIPHERAL: 1456 dwc3_gadget_exit(dwc); 1457 break; 1458 case USB_DR_MODE_HOST: 1459 dwc3_host_exit(dwc); 1460 break; 1461 case USB_DR_MODE_OTG: 1462 dwc3_drd_exit(dwc); 1463 break; 1464 default: 1465 /* do nothing */ 1466 break; 1467 } 1468 1469 /* de-assert DRVVBUS for HOST and OTG mode */ 1470 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 1471 } 1472 1473 static void dwc3_get_properties(struct dwc3 *dwc) 1474 { 1475 struct device *dev = dwc->dev; 1476 u8 lpm_nyet_threshold; 1477 u8 tx_de_emphasis; 1478 u8 hird_threshold; 1479 u8 rx_thr_num_pkt = 0; 1480 u8 rx_max_burst = 0; 1481 u8 tx_thr_num_pkt = 0; 1482 u8 tx_max_burst = 0; 1483 u8 rx_thr_num_pkt_prd = 0; 1484 u8 rx_max_burst_prd = 0; 1485 u8 tx_thr_num_pkt_prd = 0; 1486 u8 tx_max_burst_prd = 0; 1487 u8 tx_fifo_resize_max_num; 1488 const char *usb_psy_name; 1489 int ret; 1490 1491 /* default to highest possible threshold */ 1492 lpm_nyet_threshold = 0xf; 1493 1494 /* default to -3.5dB de-emphasis */ 1495 tx_de_emphasis = 1; 1496 1497 /* 1498 * default to assert utmi_sleep_n and use maximum allowed HIRD 1499 * threshold value of 0b1100 1500 */ 1501 hird_threshold = 12; 1502 1503 /* 1504 * default to a TXFIFO size large enough to fit 6 max packets. This 1505 * allows for systems with larger bus latencies to have some headroom 1506 * for endpoints that have a large bMaxBurst value. 1507 */ 1508 tx_fifo_resize_max_num = 6; 1509 1510 dwc->maximum_speed = usb_get_maximum_speed(dev); 1511 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1512 dwc->dr_mode = usb_get_dr_mode(dev); 1513 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1514 1515 dwc->sysdev_is_parent = device_property_read_bool(dev, 1516 "linux,sysdev_is_parent"); 1517 if (dwc->sysdev_is_parent) 1518 dwc->sysdev = dwc->dev->parent; 1519 else 1520 dwc->sysdev = dwc->dev; 1521 1522 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev); 1523 1524 ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name); 1525 if (ret >= 0) { 1526 dwc->usb_psy = power_supply_get_by_name(usb_psy_name); 1527 if (!dwc->usb_psy) 1528 dev_err(dev, "couldn't get usb power supply\n"); 1529 } 1530 1531 dwc->has_lpm_erratum = device_property_read_bool(dev, 1532 "snps,has-lpm-erratum"); 1533 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1534 &lpm_nyet_threshold); 1535 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1536 "snps,is-utmi-l1-suspend"); 1537 device_property_read_u8(dev, "snps,hird-threshold", 1538 &hird_threshold); 1539 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1540 "snps,dis-start-transfer-quirk"); 1541 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1542 "snps,usb3_lpm_capable"); 1543 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1544 "snps,usb2-lpm-disable"); 1545 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1546 "snps,usb2-gadget-lpm-disable"); 1547 device_property_read_u8(dev, "snps,rx-thr-num-pkt", 1548 &rx_thr_num_pkt); 1549 device_property_read_u8(dev, "snps,rx-max-burst", 1550 &rx_max_burst); 1551 device_property_read_u8(dev, "snps,tx-thr-num-pkt", 1552 &tx_thr_num_pkt); 1553 device_property_read_u8(dev, "snps,tx-max-burst", 1554 &tx_max_burst); 1555 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1556 &rx_thr_num_pkt_prd); 1557 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1558 &rx_max_burst_prd); 1559 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1560 &tx_thr_num_pkt_prd); 1561 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1562 &tx_max_burst_prd); 1563 dwc->do_fifo_resize = device_property_read_bool(dev, 1564 "tx-fifo-resize"); 1565 if (dwc->do_fifo_resize) 1566 device_property_read_u8(dev, "tx-fifo-max-num", 1567 &tx_fifo_resize_max_num); 1568 1569 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1570 "snps,disable_scramble_quirk"); 1571 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1572 "snps,u2exit_lfps_quirk"); 1573 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1574 "snps,u2ss_inp3_quirk"); 1575 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1576 "snps,req_p1p2p3_quirk"); 1577 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1578 "snps,del_p1p2p3_quirk"); 1579 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1580 "snps,del_phy_power_chg_quirk"); 1581 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1582 "snps,lfps_filter_quirk"); 1583 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1584 "snps,rx_detect_poll_quirk"); 1585 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1586 "snps,dis_u3_susphy_quirk"); 1587 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1588 "snps,dis_u2_susphy_quirk"); 1589 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1590 "snps,dis_enblslpm_quirk"); 1591 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1592 "snps,dis-u1-entry-quirk"); 1593 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1594 "snps,dis-u2-entry-quirk"); 1595 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1596 "snps,dis_rxdet_inp3_quirk"); 1597 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1598 "snps,dis-u2-freeclk-exists-quirk"); 1599 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1600 "snps,dis-del-phy-power-chg-quirk"); 1601 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1602 "snps,dis-tx-ipgap-linecheck-quirk"); 1603 dwc->resume_hs_terminations = device_property_read_bool(dev, 1604 "snps,resume-hs-terminations"); 1605 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev, 1606 "snps,ulpi-ext-vbus-drv"); 1607 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1608 "snps,parkmode-disable-ss-quirk"); 1609 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev, 1610 "snps,parkmode-disable-hs-quirk"); 1611 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev, 1612 "snps,gfladj-refclk-lpm-sel-quirk"); 1613 1614 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1615 "snps,tx_de_emphasis_quirk"); 1616 device_property_read_u8(dev, "snps,tx_de_emphasis", 1617 &tx_de_emphasis); 1618 device_property_read_string(dev, "snps,hsphy_interface", 1619 &dwc->hsphy_interface); 1620 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1621 &dwc->fladj); 1622 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1623 &dwc->ref_clk_per); 1624 1625 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1626 "snps,dis_metastability_quirk"); 1627 1628 dwc->dis_split_quirk = device_property_read_bool(dev, 1629 "snps,dis-split-quirk"); 1630 1631 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1632 dwc->tx_de_emphasis = tx_de_emphasis; 1633 1634 dwc->hird_threshold = hird_threshold; 1635 1636 dwc->rx_thr_num_pkt = rx_thr_num_pkt; 1637 dwc->rx_max_burst = rx_max_burst; 1638 1639 dwc->tx_thr_num_pkt = tx_thr_num_pkt; 1640 dwc->tx_max_burst = tx_max_burst; 1641 1642 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1643 dwc->rx_max_burst_prd = rx_max_burst_prd; 1644 1645 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1646 dwc->tx_max_burst_prd = tx_max_burst_prd; 1647 1648 dwc->imod_interval = 0; 1649 1650 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1651 } 1652 1653 /* check whether the core supports IMOD */ 1654 bool dwc3_has_imod(struct dwc3 *dwc) 1655 { 1656 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1657 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1658 DWC3_IP_IS(DWC32); 1659 } 1660 1661 static void dwc3_check_params(struct dwc3 *dwc) 1662 { 1663 struct device *dev = dwc->dev; 1664 unsigned int hwparam_gen = 1665 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1666 1667 /* Check for proper value of imod_interval */ 1668 if (dwc->imod_interval && !dwc3_has_imod(dwc)) { 1669 dev_warn(dwc->dev, "Interrupt moderation not supported\n"); 1670 dwc->imod_interval = 0; 1671 } 1672 1673 /* 1674 * Workaround for STAR 9000961433 which affects only version 1675 * 3.00a of the DWC_usb3 core. This prevents the controller 1676 * interrupt from being masked while handling events. IMOD 1677 * allows us to work around this issue. Enable it for the 1678 * affected version. 1679 */ 1680 if (!dwc->imod_interval && 1681 DWC3_VER_IS(DWC3, 300A)) 1682 dwc->imod_interval = 1; 1683 1684 /* Check the maximum_speed parameter */ 1685 switch (dwc->maximum_speed) { 1686 case USB_SPEED_FULL: 1687 case USB_SPEED_HIGH: 1688 break; 1689 case USB_SPEED_SUPER: 1690 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1691 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1692 break; 1693 case USB_SPEED_SUPER_PLUS: 1694 if ((DWC3_IP_IS(DWC32) && 1695 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1696 (!DWC3_IP_IS(DWC32) && 1697 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1698 dev_warn(dev, "UDC doesn't support SSP\n"); 1699 break; 1700 default: 1701 dev_err(dev, "invalid maximum_speed parameter %d\n", 1702 dwc->maximum_speed); 1703 fallthrough; 1704 case USB_SPEED_UNKNOWN: 1705 switch (hwparam_gen) { 1706 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1707 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1708 break; 1709 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1710 if (DWC3_IP_IS(DWC32)) 1711 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1712 else 1713 dwc->maximum_speed = USB_SPEED_SUPER; 1714 break; 1715 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1716 dwc->maximum_speed = USB_SPEED_HIGH; 1717 break; 1718 default: 1719 dwc->maximum_speed = USB_SPEED_SUPER; 1720 break; 1721 } 1722 break; 1723 } 1724 1725 /* 1726 * Currently the controller does not have visibility into the HW 1727 * parameter to determine the maximum number of lanes the HW supports. 1728 * If the number of lanes is not specified in the device property, then 1729 * set the default to support dual-lane for DWC_usb32 and single-lane 1730 * for DWC_usb31 for super-speed-plus. 1731 */ 1732 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1733 switch (dwc->max_ssp_rate) { 1734 case USB_SSP_GEN_2x1: 1735 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1736 dev_warn(dev, "UDC only supports Gen 1\n"); 1737 break; 1738 case USB_SSP_GEN_1x2: 1739 case USB_SSP_GEN_2x2: 1740 if (DWC3_IP_IS(DWC31)) 1741 dev_warn(dev, "UDC only supports single lane\n"); 1742 break; 1743 case USB_SSP_GEN_UNKNOWN: 1744 default: 1745 switch (hwparam_gen) { 1746 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1747 if (DWC3_IP_IS(DWC32)) 1748 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1749 else 1750 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1751 break; 1752 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1753 if (DWC3_IP_IS(DWC32)) 1754 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1755 break; 1756 } 1757 break; 1758 } 1759 } 1760 } 1761 1762 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) 1763 { 1764 struct device *dev = dwc->dev; 1765 struct device_node *np_phy; 1766 struct extcon_dev *edev = NULL; 1767 const char *name; 1768 1769 if (device_property_read_bool(dev, "extcon")) 1770 return extcon_get_edev_by_phandle(dev, 0); 1771 1772 /* 1773 * Device tree platforms should get extcon via phandle. 1774 * On ACPI platforms, we get the name from a device property. 1775 * This device property is for kernel internal use only and 1776 * is expected to be set by the glue code. 1777 */ 1778 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) 1779 return extcon_get_extcon_dev(name); 1780 1781 /* 1782 * Check explicitly if "usb-role-switch" is used since 1783 * extcon_find_edev_by_node() can not be used to check the absence of 1784 * an extcon device. In the absence of an device it will always return 1785 * EPROBE_DEFER. 1786 */ 1787 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && 1788 device_property_read_bool(dev, "usb-role-switch")) 1789 return NULL; 1790 1791 /* 1792 * Try to get an extcon device from the USB PHY controller's "port" 1793 * node. Check if it has the "port" node first, to avoid printing the 1794 * error message from underlying code, as it's a valid case: extcon 1795 * device (and "port" node) may be missing in case of "usb-role-switch" 1796 * or OTG mode. 1797 */ 1798 np_phy = of_parse_phandle(dev->of_node, "phys", 0); 1799 if (of_graph_is_present(np_phy)) { 1800 struct device_node *np_conn; 1801 1802 np_conn = of_graph_get_remote_node(np_phy, -1, -1); 1803 if (np_conn) 1804 edev = extcon_find_edev_by_node(np_conn); 1805 of_node_put(np_conn); 1806 } 1807 of_node_put(np_phy); 1808 1809 return edev; 1810 } 1811 1812 static int dwc3_get_clocks(struct dwc3 *dwc) 1813 { 1814 struct device *dev = dwc->dev; 1815 1816 if (!dev->of_node) 1817 return 0; 1818 1819 /* 1820 * Clocks are optional, but new DT platforms should support all clocks 1821 * as required by the DT-binding. 1822 * Some devices have different clock names in legacy device trees, 1823 * check for them to retain backwards compatibility. 1824 */ 1825 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 1826 if (IS_ERR(dwc->bus_clk)) { 1827 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1828 "could not get bus clock\n"); 1829 } 1830 1831 if (dwc->bus_clk == NULL) { 1832 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 1833 if (IS_ERR(dwc->bus_clk)) { 1834 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1835 "could not get bus clock\n"); 1836 } 1837 } 1838 1839 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 1840 if (IS_ERR(dwc->ref_clk)) { 1841 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1842 "could not get ref clock\n"); 1843 } 1844 1845 if (dwc->ref_clk == NULL) { 1846 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 1847 if (IS_ERR(dwc->ref_clk)) { 1848 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1849 "could not get ref clock\n"); 1850 } 1851 } 1852 1853 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 1854 if (IS_ERR(dwc->susp_clk)) { 1855 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1856 "could not get suspend clock\n"); 1857 } 1858 1859 if (dwc->susp_clk == NULL) { 1860 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 1861 if (IS_ERR(dwc->susp_clk)) { 1862 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1863 "could not get suspend clock\n"); 1864 } 1865 } 1866 1867 /* specific to Rockchip RK3588 */ 1868 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi"); 1869 if (IS_ERR(dwc->utmi_clk)) { 1870 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk), 1871 "could not get utmi clock\n"); 1872 } 1873 1874 /* specific to Rockchip RK3588 */ 1875 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe"); 1876 if (IS_ERR(dwc->pipe_clk)) { 1877 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk), 1878 "could not get pipe clock\n"); 1879 } 1880 1881 return 0; 1882 } 1883 1884 static int dwc3_probe(struct platform_device *pdev) 1885 { 1886 struct device *dev = &pdev->dev; 1887 struct resource *res, dwc_res; 1888 void __iomem *regs; 1889 struct dwc3 *dwc; 1890 int ret; 1891 1892 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); 1893 if (!dwc) 1894 return -ENOMEM; 1895 1896 dwc->dev = dev; 1897 1898 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1899 if (!res) { 1900 dev_err(dev, "missing memory resource\n"); 1901 return -ENODEV; 1902 } 1903 1904 dwc->xhci_resources[0].start = res->start; 1905 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 1906 DWC3_XHCI_REGS_END; 1907 dwc->xhci_resources[0].flags = res->flags; 1908 dwc->xhci_resources[0].name = res->name; 1909 1910 /* 1911 * Request memory region but exclude xHCI regs, 1912 * since it will be requested by the xhci-plat driver. 1913 */ 1914 dwc_res = *res; 1915 dwc_res.start += DWC3_GLOBALS_REGS_START; 1916 1917 if (dev->of_node) { 1918 struct device_node *parent = of_get_parent(dev->of_node); 1919 1920 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) { 1921 dwc_res.start -= DWC3_GLOBALS_REGS_START; 1922 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START; 1923 } 1924 1925 of_node_put(parent); 1926 } 1927 1928 regs = devm_ioremap_resource(dev, &dwc_res); 1929 if (IS_ERR(regs)) 1930 return PTR_ERR(regs); 1931 1932 dwc->regs = regs; 1933 dwc->regs_size = resource_size(&dwc_res); 1934 1935 dwc3_get_properties(dwc); 1936 1937 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 1938 if (IS_ERR(dwc->reset)) { 1939 ret = PTR_ERR(dwc->reset); 1940 goto err_put_psy; 1941 } 1942 1943 ret = dwc3_get_clocks(dwc); 1944 if (ret) 1945 goto err_put_psy; 1946 1947 ret = reset_control_deassert(dwc->reset); 1948 if (ret) 1949 goto err_put_psy; 1950 1951 ret = dwc3_clk_enable(dwc); 1952 if (ret) 1953 goto err_assert_reset; 1954 1955 if (!dwc3_core_is_valid(dwc)) { 1956 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 1957 ret = -ENODEV; 1958 goto err_disable_clks; 1959 } 1960 1961 platform_set_drvdata(pdev, dwc); 1962 dwc3_cache_hwparams(dwc); 1963 1964 if (!dwc->sysdev_is_parent && 1965 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { 1966 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 1967 if (ret) 1968 goto err_disable_clks; 1969 } 1970 1971 spin_lock_init(&dwc->lock); 1972 mutex_init(&dwc->mutex); 1973 1974 pm_runtime_get_noresume(dev); 1975 pm_runtime_set_active(dev); 1976 pm_runtime_use_autosuspend(dev); 1977 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 1978 pm_runtime_enable(dev); 1979 1980 pm_runtime_forbid(dev); 1981 1982 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 1983 if (ret) { 1984 dev_err(dwc->dev, "failed to allocate event buffers\n"); 1985 ret = -ENOMEM; 1986 goto err_allow_rpm; 1987 } 1988 1989 dwc->edev = dwc3_get_extcon(dwc); 1990 if (IS_ERR(dwc->edev)) { 1991 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); 1992 goto err_free_event_buffers; 1993 } 1994 1995 ret = dwc3_get_dr_mode(dwc); 1996 if (ret) 1997 goto err_free_event_buffers; 1998 1999 ret = dwc3_core_init(dwc); 2000 if (ret) { 2001 dev_err_probe(dev, ret, "failed to initialize core\n"); 2002 goto err_free_event_buffers; 2003 } 2004 2005 dwc3_check_params(dwc); 2006 dwc3_debugfs_init(dwc); 2007 2008 ret = dwc3_core_init_mode(dwc); 2009 if (ret) 2010 goto err_exit_debugfs; 2011 2012 pm_runtime_put(dev); 2013 2014 dma_set_max_seg_size(dev, UINT_MAX); 2015 2016 return 0; 2017 2018 err_exit_debugfs: 2019 dwc3_debugfs_exit(dwc); 2020 dwc3_event_buffers_cleanup(dwc); 2021 dwc3_phy_power_off(dwc); 2022 dwc3_phy_exit(dwc); 2023 dwc3_ulpi_exit(dwc); 2024 err_free_event_buffers: 2025 dwc3_free_event_buffers(dwc); 2026 err_allow_rpm: 2027 pm_runtime_allow(dev); 2028 pm_runtime_disable(dev); 2029 pm_runtime_dont_use_autosuspend(dev); 2030 pm_runtime_set_suspended(dev); 2031 pm_runtime_put_noidle(dev); 2032 err_disable_clks: 2033 dwc3_clk_disable(dwc); 2034 err_assert_reset: 2035 reset_control_assert(dwc->reset); 2036 err_put_psy: 2037 if (dwc->usb_psy) 2038 power_supply_put(dwc->usb_psy); 2039 2040 return ret; 2041 } 2042 2043 static void dwc3_remove(struct platform_device *pdev) 2044 { 2045 struct dwc3 *dwc = platform_get_drvdata(pdev); 2046 2047 pm_runtime_get_sync(&pdev->dev); 2048 2049 dwc3_core_exit_mode(dwc); 2050 dwc3_debugfs_exit(dwc); 2051 2052 dwc3_core_exit(dwc); 2053 dwc3_ulpi_exit(dwc); 2054 2055 pm_runtime_allow(&pdev->dev); 2056 pm_runtime_disable(&pdev->dev); 2057 pm_runtime_dont_use_autosuspend(&pdev->dev); 2058 pm_runtime_put_noidle(&pdev->dev); 2059 /* 2060 * HACK: Clear the driver data, which is currently accessed by parent 2061 * glue drivers, before allowing the parent to suspend. 2062 */ 2063 platform_set_drvdata(pdev, NULL); 2064 pm_runtime_set_suspended(&pdev->dev); 2065 2066 dwc3_free_event_buffers(dwc); 2067 2068 if (dwc->usb_psy) 2069 power_supply_put(dwc->usb_psy); 2070 } 2071 2072 #ifdef CONFIG_PM 2073 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 2074 { 2075 int ret; 2076 2077 ret = reset_control_deassert(dwc->reset); 2078 if (ret) 2079 return ret; 2080 2081 ret = dwc3_clk_enable(dwc); 2082 if (ret) 2083 goto assert_reset; 2084 2085 ret = dwc3_core_init(dwc); 2086 if (ret) 2087 goto disable_clks; 2088 2089 return 0; 2090 2091 disable_clks: 2092 dwc3_clk_disable(dwc); 2093 assert_reset: 2094 reset_control_assert(dwc->reset); 2095 2096 return ret; 2097 } 2098 2099 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 2100 { 2101 unsigned long flags; 2102 u32 reg; 2103 2104 switch (dwc->current_dr_role) { 2105 case DWC3_GCTL_PRTCAP_DEVICE: 2106 if (pm_runtime_suspended(dwc->dev)) 2107 break; 2108 dwc3_gadget_suspend(dwc); 2109 synchronize_irq(dwc->irq_gadget); 2110 dwc3_core_exit(dwc); 2111 break; 2112 case DWC3_GCTL_PRTCAP_HOST: 2113 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2114 dwc3_core_exit(dwc); 2115 break; 2116 } 2117 2118 /* Let controller to suspend HSPHY before PHY driver suspends */ 2119 if (dwc->dis_u2_susphy_quirk || 2120 dwc->dis_enblslpm_quirk) { 2121 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 2122 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 2123 DWC3_GUSB2PHYCFG_SUSPHY; 2124 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 2125 2126 /* Give some time for USB2 PHY to suspend */ 2127 usleep_range(5000, 6000); 2128 } 2129 2130 phy_pm_runtime_put_sync(dwc->usb2_generic_phy); 2131 phy_pm_runtime_put_sync(dwc->usb3_generic_phy); 2132 break; 2133 case DWC3_GCTL_PRTCAP_OTG: 2134 /* do nothing during runtime_suspend */ 2135 if (PMSG_IS_AUTO(msg)) 2136 break; 2137 2138 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2139 spin_lock_irqsave(&dwc->lock, flags); 2140 dwc3_gadget_suspend(dwc); 2141 spin_unlock_irqrestore(&dwc->lock, flags); 2142 synchronize_irq(dwc->irq_gadget); 2143 } 2144 2145 dwc3_otg_exit(dwc); 2146 dwc3_core_exit(dwc); 2147 break; 2148 default: 2149 /* do nothing */ 2150 break; 2151 } 2152 2153 return 0; 2154 } 2155 2156 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 2157 { 2158 unsigned long flags; 2159 int ret; 2160 u32 reg; 2161 2162 switch (dwc->current_dr_role) { 2163 case DWC3_GCTL_PRTCAP_DEVICE: 2164 ret = dwc3_core_init_for_resume(dwc); 2165 if (ret) 2166 return ret; 2167 2168 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 2169 dwc3_gadget_resume(dwc); 2170 break; 2171 case DWC3_GCTL_PRTCAP_HOST: 2172 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) { 2173 ret = dwc3_core_init_for_resume(dwc); 2174 if (ret) 2175 return ret; 2176 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 2177 break; 2178 } 2179 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 2180 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 2181 if (dwc->dis_u2_susphy_quirk) 2182 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2183 2184 if (dwc->dis_enblslpm_quirk) 2185 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 2186 2187 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 2188 2189 phy_pm_runtime_get_sync(dwc->usb2_generic_phy); 2190 phy_pm_runtime_get_sync(dwc->usb3_generic_phy); 2191 break; 2192 case DWC3_GCTL_PRTCAP_OTG: 2193 /* nothing to do on runtime_resume */ 2194 if (PMSG_IS_AUTO(msg)) 2195 break; 2196 2197 ret = dwc3_core_init_for_resume(dwc); 2198 if (ret) 2199 return ret; 2200 2201 dwc3_set_prtcap(dwc, dwc->current_dr_role); 2202 2203 dwc3_otg_init(dwc); 2204 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 2205 dwc3_otg_host_init(dwc); 2206 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2207 spin_lock_irqsave(&dwc->lock, flags); 2208 dwc3_gadget_resume(dwc); 2209 spin_unlock_irqrestore(&dwc->lock, flags); 2210 } 2211 2212 break; 2213 default: 2214 /* do nothing */ 2215 break; 2216 } 2217 2218 return 0; 2219 } 2220 2221 static int dwc3_runtime_checks(struct dwc3 *dwc) 2222 { 2223 switch (dwc->current_dr_role) { 2224 case DWC3_GCTL_PRTCAP_DEVICE: 2225 if (dwc->connected) 2226 return -EBUSY; 2227 break; 2228 case DWC3_GCTL_PRTCAP_HOST: 2229 default: 2230 /* do nothing */ 2231 break; 2232 } 2233 2234 return 0; 2235 } 2236 2237 static int dwc3_runtime_suspend(struct device *dev) 2238 { 2239 struct dwc3 *dwc = dev_get_drvdata(dev); 2240 int ret; 2241 2242 if (dwc3_runtime_checks(dwc)) 2243 return -EBUSY; 2244 2245 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2246 if (ret) 2247 return ret; 2248 2249 return 0; 2250 } 2251 2252 static int dwc3_runtime_resume(struct device *dev) 2253 { 2254 struct dwc3 *dwc = dev_get_drvdata(dev); 2255 int ret; 2256 2257 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2258 if (ret) 2259 return ret; 2260 2261 switch (dwc->current_dr_role) { 2262 case DWC3_GCTL_PRTCAP_DEVICE: 2263 dwc3_gadget_process_pending_events(dwc); 2264 break; 2265 case DWC3_GCTL_PRTCAP_HOST: 2266 default: 2267 /* do nothing */ 2268 break; 2269 } 2270 2271 pm_runtime_mark_last_busy(dev); 2272 2273 return 0; 2274 } 2275 2276 static int dwc3_runtime_idle(struct device *dev) 2277 { 2278 struct dwc3 *dwc = dev_get_drvdata(dev); 2279 2280 switch (dwc->current_dr_role) { 2281 case DWC3_GCTL_PRTCAP_DEVICE: 2282 if (dwc3_runtime_checks(dwc)) 2283 return -EBUSY; 2284 break; 2285 case DWC3_GCTL_PRTCAP_HOST: 2286 default: 2287 /* do nothing */ 2288 break; 2289 } 2290 2291 pm_runtime_mark_last_busy(dev); 2292 pm_runtime_autosuspend(dev); 2293 2294 return 0; 2295 } 2296 #endif /* CONFIG_PM */ 2297 2298 #ifdef CONFIG_PM_SLEEP 2299 static int dwc3_suspend(struct device *dev) 2300 { 2301 struct dwc3 *dwc = dev_get_drvdata(dev); 2302 int ret; 2303 2304 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2305 if (ret) 2306 return ret; 2307 2308 pinctrl_pm_select_sleep_state(dev); 2309 2310 return 0; 2311 } 2312 2313 static int dwc3_resume(struct device *dev) 2314 { 2315 struct dwc3 *dwc = dev_get_drvdata(dev); 2316 int ret; 2317 2318 pinctrl_pm_select_default_state(dev); 2319 2320 pm_runtime_disable(dev); 2321 pm_runtime_set_active(dev); 2322 2323 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2324 if (ret) { 2325 pm_runtime_set_suspended(dev); 2326 return ret; 2327 } 2328 2329 pm_runtime_enable(dev); 2330 2331 return 0; 2332 } 2333 2334 static void dwc3_complete(struct device *dev) 2335 { 2336 struct dwc3 *dwc = dev_get_drvdata(dev); 2337 u32 reg; 2338 2339 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2340 dwc->dis_split_quirk) { 2341 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2342 reg |= DWC3_GUCTL3_SPLITDISABLE; 2343 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2344 } 2345 } 2346 #else 2347 #define dwc3_complete NULL 2348 #endif /* CONFIG_PM_SLEEP */ 2349 2350 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2351 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) 2352 .complete = dwc3_complete, 2353 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, 2354 dwc3_runtime_idle) 2355 }; 2356 2357 #ifdef CONFIG_OF 2358 static const struct of_device_id of_dwc3_match[] = { 2359 { 2360 .compatible = "snps,dwc3" 2361 }, 2362 { 2363 .compatible = "synopsys,dwc3" 2364 }, 2365 { }, 2366 }; 2367 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2368 #endif 2369 2370 #ifdef CONFIG_ACPI 2371 2372 #define ACPI_ID_INTEL_BSW "808622B7" 2373 2374 static const struct acpi_device_id dwc3_acpi_match[] = { 2375 { ACPI_ID_INTEL_BSW, 0 }, 2376 { }, 2377 }; 2378 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2379 #endif 2380 2381 static struct platform_driver dwc3_driver = { 2382 .probe = dwc3_probe, 2383 .remove_new = dwc3_remove, 2384 .driver = { 2385 .name = "dwc3", 2386 .of_match_table = of_match_ptr(of_dwc3_match), 2387 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2388 .pm = &dwc3_dev_pm_ops, 2389 }, 2390 }; 2391 2392 module_platform_driver(dwc3_driver); 2393 2394 MODULE_ALIAS("platform:dwc3"); 2395 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2396 MODULE_LICENSE("GPL v2"); 2397 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2398