1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * core.c - DesignWare USB3 DRD Controller Core file
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_graph.h>
27 #include <linux/acpi.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/bitfield.h>
31
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/of.h>
35 #include <linux/usb/otg.h>
36
37 #include "core.h"
38 #include "gadget.h"
39 #include "io.h"
40
41 #include "debug.h"
42 #include "../host/xhci-ext-caps.h"
43
44 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
45
46 /**
47 * dwc3_get_dr_mode - Validates and sets dr_mode
48 * @dwc: pointer to our context structure
49 */
dwc3_get_dr_mode(struct dwc3 * dwc)50 static int dwc3_get_dr_mode(struct dwc3 *dwc)
51 {
52 enum usb_dr_mode mode;
53 struct device *dev = dwc->dev;
54 unsigned int hw_mode;
55
56 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
57 dwc->dr_mode = USB_DR_MODE_OTG;
58
59 mode = dwc->dr_mode;
60 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
61
62 switch (hw_mode) {
63 case DWC3_GHWPARAMS0_MODE_GADGET:
64 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
65 dev_err(dev,
66 "Controller does not support host mode.\n");
67 return -EINVAL;
68 }
69 mode = USB_DR_MODE_PERIPHERAL;
70 break;
71 case DWC3_GHWPARAMS0_MODE_HOST:
72 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
73 dev_err(dev,
74 "Controller does not support device mode.\n");
75 return -EINVAL;
76 }
77 mode = USB_DR_MODE_HOST;
78 break;
79 default:
80 if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
81 mode = USB_DR_MODE_HOST;
82 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
83 mode = USB_DR_MODE_PERIPHERAL;
84
85 /*
86 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
87 * mode. If the controller supports DRD but the dr_mode is not
88 * specified or set to OTG, then set the mode to peripheral.
89 */
90 if (mode == USB_DR_MODE_OTG && !dwc->edev &&
91 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
92 !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
93 !DWC3_VER_IS_PRIOR(DWC3, 330A))
94 mode = USB_DR_MODE_PERIPHERAL;
95 }
96
97 if (mode != dwc->dr_mode) {
98 dev_warn(dev,
99 "Configuration mismatch. dr_mode forced to %s\n",
100 mode == USB_DR_MODE_HOST ? "host" : "gadget");
101
102 dwc->dr_mode = mode;
103 }
104
105 return 0;
106 }
107
dwc3_enable_susphy(struct dwc3 * dwc,bool enable)108 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
109 {
110 u32 reg;
111 int i;
112
113 for (i = 0; i < dwc->num_usb3_ports; i++) {
114 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i));
115 if (enable && !dwc->dis_u3_susphy_quirk)
116 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
117 else
118 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
119
120 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg);
121 }
122
123 for (i = 0; i < dwc->num_usb2_ports; i++) {
124 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
125 if (enable && !dwc->dis_u2_susphy_quirk)
126 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
127 else
128 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
129
130 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
131 }
132 }
133
dwc3_set_prtcap(struct dwc3 * dwc,u32 mode)134 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
135 {
136 u32 reg;
137
138 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
139 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
140 reg |= DWC3_GCTL_PRTCAPDIR(mode);
141 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
142
143 dwc->current_dr_role = mode;
144 }
145
__dwc3_set_mode(struct work_struct * work)146 static void __dwc3_set_mode(struct work_struct *work)
147 {
148 struct dwc3 *dwc = work_to_dwc(work);
149 unsigned long flags;
150 int ret;
151 u32 reg;
152 u32 desired_dr_role;
153 int i;
154
155 mutex_lock(&dwc->mutex);
156 spin_lock_irqsave(&dwc->lock, flags);
157 desired_dr_role = dwc->desired_dr_role;
158 spin_unlock_irqrestore(&dwc->lock, flags);
159
160 pm_runtime_get_sync(dwc->dev);
161
162 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
163 dwc3_otg_update(dwc, 0);
164
165 if (!desired_dr_role)
166 goto out;
167
168 if (desired_dr_role == dwc->current_dr_role)
169 goto out;
170
171 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
172 goto out;
173
174 switch (dwc->current_dr_role) {
175 case DWC3_GCTL_PRTCAP_HOST:
176 dwc3_host_exit(dwc);
177 break;
178 case DWC3_GCTL_PRTCAP_DEVICE:
179 dwc3_gadget_exit(dwc);
180 dwc3_event_buffers_cleanup(dwc);
181 break;
182 case DWC3_GCTL_PRTCAP_OTG:
183 dwc3_otg_exit(dwc);
184 spin_lock_irqsave(&dwc->lock, flags);
185 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
186 spin_unlock_irqrestore(&dwc->lock, flags);
187 dwc3_otg_update(dwc, 1);
188 break;
189 default:
190 break;
191 }
192
193 /*
194 * When current_dr_role is not set, there's no role switching.
195 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
196 */
197 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
198 DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
199 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
200 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
201 reg |= DWC3_GCTL_CORESOFTRESET;
202 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
203
204 /*
205 * Wait for internal clocks to synchronized. DWC_usb31 and
206 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
207 * keep it consistent across different IPs, let's wait up to
208 * 100ms before clearing GCTL.CORESOFTRESET.
209 */
210 msleep(100);
211
212 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
213 reg &= ~DWC3_GCTL_CORESOFTRESET;
214 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
215 }
216
217 spin_lock_irqsave(&dwc->lock, flags);
218
219 dwc3_set_prtcap(dwc, desired_dr_role);
220
221 spin_unlock_irqrestore(&dwc->lock, flags);
222
223 switch (desired_dr_role) {
224 case DWC3_GCTL_PRTCAP_HOST:
225 ret = dwc3_host_init(dwc);
226 if (ret) {
227 dev_err(dwc->dev, "failed to initialize host\n");
228 } else {
229 if (dwc->usb2_phy)
230 otg_set_vbus(dwc->usb2_phy->otg, true);
231
232 for (i = 0; i < dwc->num_usb2_ports; i++)
233 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST);
234 for (i = 0; i < dwc->num_usb3_ports; i++)
235 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST);
236
237 if (dwc->dis_split_quirk) {
238 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
239 reg |= DWC3_GUCTL3_SPLITDISABLE;
240 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
241 }
242 }
243 break;
244 case DWC3_GCTL_PRTCAP_DEVICE:
245 dwc3_core_soft_reset(dwc);
246
247 dwc3_event_buffers_setup(dwc);
248
249 if (dwc->usb2_phy)
250 otg_set_vbus(dwc->usb2_phy->otg, false);
251 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE);
252 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE);
253
254 ret = dwc3_gadget_init(dwc);
255 if (ret)
256 dev_err(dwc->dev, "failed to initialize peripheral\n");
257 break;
258 case DWC3_GCTL_PRTCAP_OTG:
259 dwc3_otg_init(dwc);
260 dwc3_otg_update(dwc, 0);
261 break;
262 default:
263 break;
264 }
265
266 out:
267 pm_runtime_mark_last_busy(dwc->dev);
268 pm_runtime_put_autosuspend(dwc->dev);
269 mutex_unlock(&dwc->mutex);
270 }
271
dwc3_set_mode(struct dwc3 * dwc,u32 mode)272 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
273 {
274 unsigned long flags;
275
276 if (dwc->dr_mode != USB_DR_MODE_OTG)
277 return;
278
279 spin_lock_irqsave(&dwc->lock, flags);
280 dwc->desired_dr_role = mode;
281 spin_unlock_irqrestore(&dwc->lock, flags);
282
283 queue_work(system_freezable_wq, &dwc->drd_work);
284 }
285
dwc3_core_fifo_space(struct dwc3_ep * dep,u8 type)286 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
287 {
288 struct dwc3 *dwc = dep->dwc;
289 u32 reg;
290
291 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
292 DWC3_GDBGFIFOSPACE_NUM(dep->number) |
293 DWC3_GDBGFIFOSPACE_TYPE(type));
294
295 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
296
297 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
298 }
299
300 /**
301 * dwc3_core_soft_reset - Issues core soft reset and PHY reset
302 * @dwc: pointer to our context structure
303 */
dwc3_core_soft_reset(struct dwc3 * dwc)304 int dwc3_core_soft_reset(struct dwc3 *dwc)
305 {
306 u32 reg;
307 int retries = 1000;
308
309 /*
310 * We're resetting only the device side because, if we're in host mode,
311 * XHCI driver will reset the host block. If dwc3 was configured for
312 * host-only mode, then we can return early.
313 */
314 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
315 return 0;
316
317 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
318 reg |= DWC3_DCTL_CSFTRST;
319 reg &= ~DWC3_DCTL_RUN_STOP;
320 dwc3_gadget_dctl_write_safe(dwc, reg);
321
322 /*
323 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
324 * is cleared only after all the clocks are synchronized. This can
325 * take a little more than 50ms. Set the polling rate at 20ms
326 * for 10 times instead.
327 */
328 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
329 retries = 10;
330
331 do {
332 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
333 if (!(reg & DWC3_DCTL_CSFTRST))
334 goto done;
335
336 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
337 msleep(20);
338 else
339 udelay(1);
340 } while (--retries);
341
342 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
343 return -ETIMEDOUT;
344
345 done:
346 /*
347 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
348 * is cleared, we must wait at least 50ms before accessing the PHY
349 * domain (synchronization delay).
350 */
351 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
352 msleep(50);
353
354 return 0;
355 }
356
357 /*
358 * dwc3_frame_length_adjustment - Adjusts frame length if required
359 * @dwc3: Pointer to our controller context structure
360 */
dwc3_frame_length_adjustment(struct dwc3 * dwc)361 static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
362 {
363 u32 reg;
364 u32 dft;
365
366 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
367 return;
368
369 if (dwc->fladj == 0)
370 return;
371
372 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
373 dft = reg & DWC3_GFLADJ_30MHZ_MASK;
374 if (dft != dwc->fladj) {
375 reg &= ~DWC3_GFLADJ_30MHZ_MASK;
376 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
377 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
378 }
379 }
380
381 /**
382 * dwc3_ref_clk_period - Reference clock period configuration
383 * Default reference clock period depends on hardware
384 * configuration. For systems with reference clock that differs
385 * from the default, this will set clock period in DWC3_GUCTL
386 * register.
387 * @dwc: Pointer to our controller context structure
388 */
dwc3_ref_clk_period(struct dwc3 * dwc)389 static void dwc3_ref_clk_period(struct dwc3 *dwc)
390 {
391 unsigned long period;
392 unsigned long fladj;
393 unsigned long decr;
394 unsigned long rate;
395 u32 reg;
396
397 if (dwc->ref_clk) {
398 rate = clk_get_rate(dwc->ref_clk);
399 if (!rate)
400 return;
401 period = NSEC_PER_SEC / rate;
402 } else if (dwc->ref_clk_per) {
403 period = dwc->ref_clk_per;
404 rate = NSEC_PER_SEC / period;
405 } else {
406 return;
407 }
408
409 reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
410 reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
411 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
412 dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
413
414 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
415 return;
416
417 /*
418 * The calculation below is
419 *
420 * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
421 *
422 * but rearranged for fixed-point arithmetic. The division must be
423 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
424 * neither does rate * period).
425 *
426 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
427 * nanoseconds of error caused by the truncation which happened during
428 * the division when calculating rate or period (whichever one was
429 * derived from the other). We first calculate the relative error, then
430 * scale it to units of 8 ppm.
431 */
432 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
433 fladj -= 125000;
434
435 /*
436 * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
437 */
438 decr = 480000000 / rate;
439
440 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
441 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
442 & ~DWC3_GFLADJ_240MHZDECR
443 & ~DWC3_GFLADJ_240MHZDECR_PLS1;
444 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
445 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
446 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
447
448 if (dwc->gfladj_refclk_lpm_sel)
449 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
450
451 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
452 }
453
454 /**
455 * dwc3_free_one_event_buffer - Frees one event buffer
456 * @dwc: Pointer to our controller context structure
457 * @evt: Pointer to event buffer to be freed
458 */
dwc3_free_one_event_buffer(struct dwc3 * dwc,struct dwc3_event_buffer * evt)459 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
460 struct dwc3_event_buffer *evt)
461 {
462 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
463 }
464
465 /**
466 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
467 * @dwc: Pointer to our controller context structure
468 * @length: size of the event buffer
469 *
470 * Returns a pointer to the allocated event buffer structure on success
471 * otherwise ERR_PTR(errno).
472 */
dwc3_alloc_one_event_buffer(struct dwc3 * dwc,unsigned int length)473 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
474 unsigned int length)
475 {
476 struct dwc3_event_buffer *evt;
477
478 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
479 if (!evt)
480 return ERR_PTR(-ENOMEM);
481
482 evt->dwc = dwc;
483 evt->length = length;
484 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
485 if (!evt->cache)
486 return ERR_PTR(-ENOMEM);
487
488 evt->buf = dma_alloc_coherent(dwc->sysdev, length,
489 &evt->dma, GFP_KERNEL);
490 if (!evt->buf)
491 return ERR_PTR(-ENOMEM);
492
493 return evt;
494 }
495
496 /**
497 * dwc3_free_event_buffers - frees all allocated event buffers
498 * @dwc: Pointer to our controller context structure
499 */
dwc3_free_event_buffers(struct dwc3 * dwc)500 static void dwc3_free_event_buffers(struct dwc3 *dwc)
501 {
502 struct dwc3_event_buffer *evt;
503
504 evt = dwc->ev_buf;
505 if (evt)
506 dwc3_free_one_event_buffer(dwc, evt);
507 }
508
509 /**
510 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
511 * @dwc: pointer to our controller context structure
512 * @length: size of event buffer
513 *
514 * Returns 0 on success otherwise negative errno. In the error case, dwc
515 * may contain some buffers allocated but not all which were requested.
516 */
dwc3_alloc_event_buffers(struct dwc3 * dwc,unsigned int length)517 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
518 {
519 struct dwc3_event_buffer *evt;
520 unsigned int hw_mode;
521
522 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
523 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
524 dwc->ev_buf = NULL;
525 return 0;
526 }
527
528 evt = dwc3_alloc_one_event_buffer(dwc, length);
529 if (IS_ERR(evt)) {
530 dev_err(dwc->dev, "can't allocate event buffer\n");
531 return PTR_ERR(evt);
532 }
533 dwc->ev_buf = evt;
534
535 return 0;
536 }
537
538 /**
539 * dwc3_event_buffers_setup - setup our allocated event buffers
540 * @dwc: pointer to our controller context structure
541 *
542 * Returns 0 on success otherwise negative errno.
543 */
dwc3_event_buffers_setup(struct dwc3 * dwc)544 int dwc3_event_buffers_setup(struct dwc3 *dwc)
545 {
546 struct dwc3_event_buffer *evt;
547 u32 reg;
548
549 if (!dwc->ev_buf)
550 return 0;
551
552 evt = dwc->ev_buf;
553 evt->lpos = 0;
554 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
555 lower_32_bits(evt->dma));
556 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
557 upper_32_bits(evt->dma));
558 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
559 DWC3_GEVNTSIZ_SIZE(evt->length));
560
561 /* Clear any stale event */
562 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
563 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
564 return 0;
565 }
566
dwc3_event_buffers_cleanup(struct dwc3 * dwc)567 void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
568 {
569 struct dwc3_event_buffer *evt;
570 u32 reg;
571
572 if (!dwc->ev_buf)
573 return;
574 /*
575 * Exynos platforms may not be able to access event buffer if the
576 * controller failed to halt on dwc3_core_exit().
577 */
578 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
579 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
580 return;
581
582 evt = dwc->ev_buf;
583
584 evt->lpos = 0;
585
586 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
587 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
588 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
589 | DWC3_GEVNTSIZ_SIZE(0));
590
591 /* Clear any stale event */
592 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
593 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
594 }
595
dwc3_core_num_eps(struct dwc3 * dwc)596 static void dwc3_core_num_eps(struct dwc3 *dwc)
597 {
598 struct dwc3_hwparams *parms = &dwc->hwparams;
599
600 dwc->num_eps = DWC3_NUM_EPS(parms);
601 }
602
dwc3_cache_hwparams(struct dwc3 * dwc)603 static void dwc3_cache_hwparams(struct dwc3 *dwc)
604 {
605 struct dwc3_hwparams *parms = &dwc->hwparams;
606
607 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
608 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
609 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
610 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
611 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
612 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
613 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
614 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
615 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
616
617 if (DWC3_IP_IS(DWC32))
618 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
619 }
620
dwc3_config_soc_bus(struct dwc3 * dwc)621 static void dwc3_config_soc_bus(struct dwc3 *dwc)
622 {
623 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) {
624 u32 reg;
625
626 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
627 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0);
628 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo);
629 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
630 }
631 }
632
dwc3_core_ulpi_init(struct dwc3 * dwc)633 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
634 {
635 int intf;
636 int ret = 0;
637
638 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
639
640 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
641 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
642 dwc->hsphy_interface &&
643 !strncmp(dwc->hsphy_interface, "ulpi", 4)))
644 ret = dwc3_ulpi_init(dwc);
645
646 return ret;
647 }
648
dwc3_ss_phy_setup(struct dwc3 * dwc,int index)649 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
650 {
651 u32 reg;
652
653 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index));
654
655 /*
656 * Make sure UX_EXIT_PX is cleared as that causes issues with some
657 * PHYs. Also, this bit is not supposed to be used in normal operation.
658 */
659 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
660
661 /*
662 * Above DWC_usb3.0 1.94a, it is recommended to set
663 * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
664 * So default value will be '0' when the core is reset. Application
665 * needs to set it to '1' after the core initialization is completed.
666 *
667 * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
668 * cleared after power-on reset, and it can be set after core
669 * initialization.
670 */
671 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
672
673 if (dwc->u2ss_inp3_quirk)
674 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
675
676 if (dwc->dis_rxdet_inp3_quirk)
677 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
678
679 if (dwc->req_p1p2p3_quirk)
680 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
681
682 if (dwc->del_p1p2p3_quirk)
683 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
684
685 if (dwc->del_phy_power_chg_quirk)
686 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
687
688 if (dwc->lfps_filter_quirk)
689 reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
690
691 if (dwc->rx_detect_poll_quirk)
692 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
693
694 if (dwc->tx_de_emphasis_quirk)
695 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
696
697 if (dwc->dis_del_phy_power_chg_quirk)
698 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
699
700 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg);
701
702 return 0;
703 }
704
dwc3_hs_phy_setup(struct dwc3 * dwc,int index)705 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
706 {
707 u32 reg;
708
709 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index));
710
711 /* Select the HS PHY interface */
712 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
713 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
714 if (dwc->hsphy_interface &&
715 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
716 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
717 break;
718 } else if (dwc->hsphy_interface &&
719 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
720 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
721 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg);
722 } else {
723 /* Relying on default value. */
724 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
725 break;
726 }
727 fallthrough;
728 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
729 default:
730 break;
731 }
732
733 switch (dwc->hsphy_mode) {
734 case USBPHY_INTERFACE_MODE_UTMI:
735 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
736 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
737 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
738 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
739 break;
740 case USBPHY_INTERFACE_MODE_UTMIW:
741 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
742 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
743 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
744 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
745 break;
746 default:
747 break;
748 }
749
750 /*
751 * Above DWC_usb3.0 1.94a, it is recommended to set
752 * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
753 * So default value will be '0' when the core is reset. Application
754 * needs to set it to '1' after the core initialization is completed.
755 *
756 * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
757 * after power-on reset, and it can be set after core initialization.
758 */
759 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
760
761 if (dwc->dis_enblslpm_quirk)
762 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
763 else
764 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
765
766 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
767 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
768
769 /*
770 * Some ULPI USB PHY does not support internal VBUS supply, to drive
771 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
772 * bit of OTG_CTRL register. Controller configures the USB2 PHY
773 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
774 * with an external supply.
775 */
776 if (dwc->ulpi_ext_vbus_drv)
777 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
778
779 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg);
780
781 return 0;
782 }
783
784 /**
785 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
786 * @dwc: Pointer to our controller context structure
787 *
788 * Returns 0 on success. The USB PHY interfaces are configured but not
789 * initialized. The PHY interfaces and the PHYs get initialized together with
790 * the core in dwc3_core_init.
791 */
dwc3_phy_setup(struct dwc3 * dwc)792 static int dwc3_phy_setup(struct dwc3 *dwc)
793 {
794 int i;
795 int ret;
796
797 for (i = 0; i < dwc->num_usb3_ports; i++) {
798 ret = dwc3_ss_phy_setup(dwc, i);
799 if (ret)
800 return ret;
801 }
802
803 for (i = 0; i < dwc->num_usb2_ports; i++) {
804 ret = dwc3_hs_phy_setup(dwc, i);
805 if (ret)
806 return ret;
807 }
808
809 return 0;
810 }
811
dwc3_phy_init(struct dwc3 * dwc)812 static int dwc3_phy_init(struct dwc3 *dwc)
813 {
814 int ret;
815 int i;
816 int j;
817
818 usb_phy_init(dwc->usb2_phy);
819 usb_phy_init(dwc->usb3_phy);
820
821 for (i = 0; i < dwc->num_usb2_ports; i++) {
822 ret = phy_init(dwc->usb2_generic_phy[i]);
823 if (ret < 0)
824 goto err_exit_usb2_phy;
825 }
826
827 for (j = 0; j < dwc->num_usb3_ports; j++) {
828 ret = phy_init(dwc->usb3_generic_phy[j]);
829 if (ret < 0)
830 goto err_exit_usb3_phy;
831 }
832
833 return 0;
834
835 err_exit_usb3_phy:
836 while (--j >= 0)
837 phy_exit(dwc->usb3_generic_phy[j]);
838
839 err_exit_usb2_phy:
840 while (--i >= 0)
841 phy_exit(dwc->usb2_generic_phy[i]);
842
843 usb_phy_shutdown(dwc->usb3_phy);
844 usb_phy_shutdown(dwc->usb2_phy);
845
846 return ret;
847 }
848
dwc3_phy_exit(struct dwc3 * dwc)849 static void dwc3_phy_exit(struct dwc3 *dwc)
850 {
851 int i;
852
853 for (i = 0; i < dwc->num_usb3_ports; i++)
854 phy_exit(dwc->usb3_generic_phy[i]);
855
856 for (i = 0; i < dwc->num_usb2_ports; i++)
857 phy_exit(dwc->usb2_generic_phy[i]);
858
859 usb_phy_shutdown(dwc->usb3_phy);
860 usb_phy_shutdown(dwc->usb2_phy);
861 }
862
dwc3_phy_power_on(struct dwc3 * dwc)863 static int dwc3_phy_power_on(struct dwc3 *dwc)
864 {
865 int ret;
866 int i;
867 int j;
868
869 usb_phy_set_suspend(dwc->usb2_phy, 0);
870 usb_phy_set_suspend(dwc->usb3_phy, 0);
871
872 for (i = 0; i < dwc->num_usb2_ports; i++) {
873 ret = phy_power_on(dwc->usb2_generic_phy[i]);
874 if (ret < 0)
875 goto err_power_off_usb2_phy;
876 }
877
878 for (j = 0; j < dwc->num_usb3_ports; j++) {
879 ret = phy_power_on(dwc->usb3_generic_phy[j]);
880 if (ret < 0)
881 goto err_power_off_usb3_phy;
882 }
883
884 return 0;
885
886 err_power_off_usb3_phy:
887 while (--j >= 0)
888 phy_power_off(dwc->usb3_generic_phy[j]);
889
890 err_power_off_usb2_phy:
891 while (--i >= 0)
892 phy_power_off(dwc->usb2_generic_phy[i]);
893
894 usb_phy_set_suspend(dwc->usb3_phy, 1);
895 usb_phy_set_suspend(dwc->usb2_phy, 1);
896
897 return ret;
898 }
899
dwc3_phy_power_off(struct dwc3 * dwc)900 static void dwc3_phy_power_off(struct dwc3 *dwc)
901 {
902 int i;
903
904 for (i = 0; i < dwc->num_usb3_ports; i++)
905 phy_power_off(dwc->usb3_generic_phy[i]);
906
907 for (i = 0; i < dwc->num_usb2_ports; i++)
908 phy_power_off(dwc->usb2_generic_phy[i]);
909
910 usb_phy_set_suspend(dwc->usb3_phy, 1);
911 usb_phy_set_suspend(dwc->usb2_phy, 1);
912 }
913
dwc3_clk_enable(struct dwc3 * dwc)914 static int dwc3_clk_enable(struct dwc3 *dwc)
915 {
916 int ret;
917
918 ret = clk_prepare_enable(dwc->bus_clk);
919 if (ret)
920 return ret;
921
922 ret = clk_prepare_enable(dwc->ref_clk);
923 if (ret)
924 goto disable_bus_clk;
925
926 ret = clk_prepare_enable(dwc->susp_clk);
927 if (ret)
928 goto disable_ref_clk;
929
930 ret = clk_prepare_enable(dwc->utmi_clk);
931 if (ret)
932 goto disable_susp_clk;
933
934 ret = clk_prepare_enable(dwc->pipe_clk);
935 if (ret)
936 goto disable_utmi_clk;
937
938 return 0;
939
940 disable_utmi_clk:
941 clk_disable_unprepare(dwc->utmi_clk);
942 disable_susp_clk:
943 clk_disable_unprepare(dwc->susp_clk);
944 disable_ref_clk:
945 clk_disable_unprepare(dwc->ref_clk);
946 disable_bus_clk:
947 clk_disable_unprepare(dwc->bus_clk);
948 return ret;
949 }
950
dwc3_clk_disable(struct dwc3 * dwc)951 static void dwc3_clk_disable(struct dwc3 *dwc)
952 {
953 clk_disable_unprepare(dwc->pipe_clk);
954 clk_disable_unprepare(dwc->utmi_clk);
955 clk_disable_unprepare(dwc->susp_clk);
956 clk_disable_unprepare(dwc->ref_clk);
957 clk_disable_unprepare(dwc->bus_clk);
958 }
959
dwc3_core_exit(struct dwc3 * dwc)960 static void dwc3_core_exit(struct dwc3 *dwc)
961 {
962 dwc3_event_buffers_cleanup(dwc);
963 dwc3_phy_power_off(dwc);
964 dwc3_phy_exit(dwc);
965 dwc3_clk_disable(dwc);
966 reset_control_assert(dwc->reset);
967 }
968
dwc3_core_is_valid(struct dwc3 * dwc)969 static bool dwc3_core_is_valid(struct dwc3 *dwc)
970 {
971 u32 reg;
972
973 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
974 dwc->ip = DWC3_GSNPS_ID(reg);
975
976 /* This should read as U3 followed by revision number */
977 if (DWC3_IP_IS(DWC3)) {
978 dwc->revision = reg;
979 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
980 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
981 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
982 } else {
983 return false;
984 }
985
986 return true;
987 }
988
dwc3_core_setup_global_control(struct dwc3 * dwc)989 static void dwc3_core_setup_global_control(struct dwc3 *dwc)
990 {
991 unsigned int power_opt;
992 unsigned int hw_mode;
993 u32 reg;
994
995 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
996 reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
997 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
998 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
999
1000 switch (power_opt) {
1001 case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
1002 /**
1003 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
1004 * issue which would cause xHCI compliance tests to fail.
1005 *
1006 * Because of that we cannot enable clock gating on such
1007 * configurations.
1008 *
1009 * Refers to:
1010 *
1011 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
1012 * SOF/ITP Mode Used
1013 */
1014 if ((dwc->dr_mode == USB_DR_MODE_HOST ||
1015 dwc->dr_mode == USB_DR_MODE_OTG) &&
1016 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
1017 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
1018 else
1019 reg &= ~DWC3_GCTL_DSBLCLKGTNG;
1020 break;
1021 case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
1022 /*
1023 * REVISIT Enabling this bit so that host-mode hibernation
1024 * will work. Device-mode hibernation is not yet implemented.
1025 */
1026 reg |= DWC3_GCTL_GBLHIBERNATIONEN;
1027 break;
1028 default:
1029 /* nothing */
1030 break;
1031 }
1032
1033 /*
1034 * This is a workaround for STAR#4846132, which only affects
1035 * DWC_usb31 version2.00a operating in host mode.
1036 *
1037 * There is a problem in DWC_usb31 version 2.00a operating
1038 * in host mode that would cause a CSR read timeout When CSR
1039 * read coincides with RAM Clock Gating Entry. By disable
1040 * Clock Gating, sacrificing power consumption for normal
1041 * operation.
1042 */
1043 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
1044 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
1045 reg |= DWC3_GCTL_DSBLCLKGTNG;
1046
1047 /* check if current dwc3 is on simulation board */
1048 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
1049 dev_info(dwc->dev, "Running with FPGA optimizations\n");
1050 dwc->is_fpga = true;
1051 }
1052
1053 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
1054 "disable_scramble cannot be used on non-FPGA builds\n");
1055
1056 if (dwc->disable_scramble_quirk && dwc->is_fpga)
1057 reg |= DWC3_GCTL_DISSCRAMBLE;
1058 else
1059 reg &= ~DWC3_GCTL_DISSCRAMBLE;
1060
1061 if (dwc->u2exit_lfps_quirk)
1062 reg |= DWC3_GCTL_U2EXIT_LFPS;
1063
1064 /*
1065 * WORKAROUND: DWC3 revisions <1.90a have a bug
1066 * where the device can fail to connect at SuperSpeed
1067 * and falls back to high-speed mode which causes
1068 * the device to enter a Connect/Disconnect loop
1069 */
1070 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
1071 reg |= DWC3_GCTL_U2RSTECN;
1072
1073 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1074 }
1075
1076 static int dwc3_core_get_phy(struct dwc3 *dwc);
1077 static int dwc3_core_ulpi_init(struct dwc3 *dwc);
1078
1079 /* set global incr burst type configuration registers */
dwc3_set_incr_burst_type(struct dwc3 * dwc)1080 static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
1081 {
1082 struct device *dev = dwc->dev;
1083 /* incrx_mode : for INCR burst type. */
1084 bool incrx_mode;
1085 /* incrx_size : for size of INCRX burst. */
1086 u32 incrx_size;
1087 u32 *vals;
1088 u32 cfg;
1089 int ntype;
1090 int ret;
1091 int i;
1092
1093 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
1094
1095 /*
1096 * Handle property "snps,incr-burst-type-adjustment".
1097 * Get the number of value from this property:
1098 * result <= 0, means this property is not supported.
1099 * result = 1, means INCRx burst mode supported.
1100 * result > 1, means undefined length burst mode supported.
1101 */
1102 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
1103 if (ntype <= 0)
1104 return;
1105
1106 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
1107 if (!vals)
1108 return;
1109
1110 /* Get INCR burst type, and parse it */
1111 ret = device_property_read_u32_array(dev,
1112 "snps,incr-burst-type-adjustment", vals, ntype);
1113 if (ret) {
1114 kfree(vals);
1115 dev_err(dev, "Error to get property\n");
1116 return;
1117 }
1118
1119 incrx_size = *vals;
1120
1121 if (ntype > 1) {
1122 /* INCRX (undefined length) burst mode */
1123 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
1124 for (i = 1; i < ntype; i++) {
1125 if (vals[i] > incrx_size)
1126 incrx_size = vals[i];
1127 }
1128 } else {
1129 /* INCRX burst mode */
1130 incrx_mode = INCRX_BURST_MODE;
1131 }
1132
1133 kfree(vals);
1134
1135 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
1136 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
1137 if (incrx_mode)
1138 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
1139 switch (incrx_size) {
1140 case 256:
1141 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
1142 break;
1143 case 128:
1144 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
1145 break;
1146 case 64:
1147 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
1148 break;
1149 case 32:
1150 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
1151 break;
1152 case 16:
1153 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
1154 break;
1155 case 8:
1156 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
1157 break;
1158 case 4:
1159 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
1160 break;
1161 case 1:
1162 break;
1163 default:
1164 dev_err(dev, "Invalid property\n");
1165 break;
1166 }
1167
1168 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
1169 }
1170
dwc3_set_power_down_clk_scale(struct dwc3 * dwc)1171 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
1172 {
1173 u32 scale;
1174 u32 reg;
1175
1176 if (!dwc->susp_clk)
1177 return;
1178
1179 /*
1180 * The power down scale field specifies how many suspend_clk
1181 * periods fit into a 16KHz clock period. When performing
1182 * the division, round up the remainder.
1183 *
1184 * The power down scale value is calculated using the fastest
1185 * frequency of the suspend_clk. If it isn't fixed (but within
1186 * the accuracy requirement), the driver may not know the max
1187 * rate of the suspend_clk, so only update the power down scale
1188 * if the default is less than the calculated value from
1189 * clk_get_rate() or if the default is questionably high
1190 * (3x or more) to be within the requirement.
1191 */
1192 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
1193 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1194 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
1195 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
1196 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
1197 reg |= DWC3_GCTL_PWRDNSCALE(scale);
1198 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1199 }
1200 }
1201
dwc3_config_threshold(struct dwc3 * dwc)1202 static void dwc3_config_threshold(struct dwc3 *dwc)
1203 {
1204 u32 reg;
1205 u8 rx_thr_num;
1206 u8 rx_maxburst;
1207 u8 tx_thr_num;
1208 u8 tx_maxburst;
1209
1210 /*
1211 * Must config both number of packets and max burst settings to enable
1212 * RX and/or TX threshold.
1213 */
1214 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
1215 rx_thr_num = dwc->rx_thr_num_pkt_prd;
1216 rx_maxburst = dwc->rx_max_burst_prd;
1217 tx_thr_num = dwc->tx_thr_num_pkt_prd;
1218 tx_maxburst = dwc->tx_max_burst_prd;
1219
1220 if (rx_thr_num && rx_maxburst) {
1221 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1222 reg |= DWC31_RXTHRNUMPKTSEL_PRD;
1223
1224 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
1225 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
1226
1227 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
1228 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
1229
1230 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1231 }
1232
1233 if (tx_thr_num && tx_maxburst) {
1234 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1235 reg |= DWC31_TXTHRNUMPKTSEL_PRD;
1236
1237 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
1238 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
1239
1240 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
1241 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
1242
1243 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1244 }
1245 }
1246
1247 rx_thr_num = dwc->rx_thr_num_pkt;
1248 rx_maxburst = dwc->rx_max_burst;
1249 tx_thr_num = dwc->tx_thr_num_pkt;
1250 tx_maxburst = dwc->tx_max_burst;
1251
1252 if (DWC3_IP_IS(DWC3)) {
1253 if (rx_thr_num && rx_maxburst) {
1254 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1255 reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
1256
1257 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
1258 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1259
1260 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1261 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1262
1263 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1264 }
1265
1266 if (tx_thr_num && tx_maxburst) {
1267 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1268 reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
1269
1270 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
1271 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1272
1273 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1274 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1275
1276 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1277 }
1278 } else {
1279 if (rx_thr_num && rx_maxburst) {
1280 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1281 reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
1282
1283 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
1284 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1285
1286 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1287 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1288
1289 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1290 }
1291
1292 if (tx_thr_num && tx_maxburst) {
1293 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1294 reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
1295
1296 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
1297 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1298
1299 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1300 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1301
1302 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1303 }
1304 }
1305 }
1306
1307 /**
1308 * dwc3_core_init - Low-level initialization of DWC3 Core
1309 * @dwc: Pointer to our controller context structure
1310 *
1311 * Returns 0 on success otherwise negative errno.
1312 */
dwc3_core_init(struct dwc3 * dwc)1313 static int dwc3_core_init(struct dwc3 *dwc)
1314 {
1315 unsigned int hw_mode;
1316 u32 reg;
1317 int ret;
1318
1319 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1320
1321 /*
1322 * Write Linux Version Code to our GUID register so it's easy to figure
1323 * out which kernel version a bug was found.
1324 */
1325 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
1326
1327 ret = dwc3_phy_setup(dwc);
1328 if (ret)
1329 return ret;
1330
1331 if (!dwc->ulpi_ready) {
1332 ret = dwc3_core_ulpi_init(dwc);
1333 if (ret) {
1334 if (ret == -ETIMEDOUT) {
1335 dwc3_core_soft_reset(dwc);
1336 ret = -EPROBE_DEFER;
1337 }
1338 return ret;
1339 }
1340 dwc->ulpi_ready = true;
1341 }
1342
1343 if (!dwc->phys_ready) {
1344 ret = dwc3_core_get_phy(dwc);
1345 if (ret)
1346 goto err_exit_ulpi;
1347 dwc->phys_ready = true;
1348 }
1349
1350 ret = dwc3_phy_init(dwc);
1351 if (ret)
1352 goto err_exit_ulpi;
1353
1354 ret = dwc3_core_soft_reset(dwc);
1355 if (ret)
1356 goto err_exit_phy;
1357
1358 dwc3_core_setup_global_control(dwc);
1359 dwc3_core_num_eps(dwc);
1360
1361 /* Set power down scale of suspend_clk */
1362 dwc3_set_power_down_clk_scale(dwc);
1363
1364 /* Adjust Frame Length */
1365 dwc3_frame_length_adjustment(dwc);
1366
1367 /* Adjust Reference Clock Period */
1368 dwc3_ref_clk_period(dwc);
1369
1370 dwc3_set_incr_burst_type(dwc);
1371
1372 dwc3_config_soc_bus(dwc);
1373
1374 ret = dwc3_phy_power_on(dwc);
1375 if (ret)
1376 goto err_exit_phy;
1377
1378 ret = dwc3_event_buffers_setup(dwc);
1379 if (ret) {
1380 dev_err(dwc->dev, "failed to setup event buffers\n");
1381 goto err_power_off_phy;
1382 }
1383
1384 /*
1385 * ENDXFER polling is available on version 3.10a and later of
1386 * the DWC_usb3 controller. It is NOT available in the
1387 * DWC_usb31 controller.
1388 */
1389 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
1390 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1391 reg |= DWC3_GUCTL2_RST_ACTBITLATER;
1392 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1393 }
1394
1395 /*
1396 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
1397 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
1398 * link compliance test (TD7.21) may fail. If the ECN is not
1399 * enabled (GUCTL2[19] = 0), the controller will use the old timer
1400 * value (5us), which is still acceptable for the link compliance
1401 * test. Therefore, do not enable PM TIMER ECM in 3.20a by
1402 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
1403 */
1404 if (DWC3_VER_IS(DWC3, 320A)) {
1405 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1406 reg &= ~DWC3_GUCTL2_LC_TIMER;
1407 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1408 }
1409
1410 /*
1411 * When configured in HOST mode, after issuing U3/L2 exit controller
1412 * fails to send proper CRC checksum in CRC5 feild. Because of this
1413 * behaviour Transaction Error is generated, resulting in reset and
1414 * re-enumeration of usb device attached. All the termsel, xcvrsel,
1415 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
1416 * will correct this problem. This option is to support certain
1417 * legacy ULPI PHYs.
1418 */
1419 if (dwc->resume_hs_terminations) {
1420 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1421 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
1422 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1423 }
1424
1425 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
1426 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1427
1428 /*
1429 * Enable hardware control of sending remote wakeup
1430 * in HS when the device is in the L1 state.
1431 */
1432 if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
1433 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
1434
1435 /*
1436 * Decouple USB 2.0 L1 & L2 events which will allow for
1437 * gadget driver to only receive U3/L2 suspend & wakeup
1438 * events and prevent the more frequent L1 LPM transitions
1439 * from interrupting the driver.
1440 */
1441 if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
1442 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
1443
1444 if (dwc->dis_tx_ipgap_linecheck_quirk)
1445 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
1446
1447 if (dwc->parkmode_disable_ss_quirk)
1448 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
1449
1450 if (dwc->parkmode_disable_hs_quirk)
1451 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
1452
1453 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) {
1454 if (dwc->maximum_speed == USB_SPEED_FULL ||
1455 dwc->maximum_speed == USB_SPEED_HIGH)
1456 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1457 else
1458 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1459 }
1460
1461 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1462 }
1463
1464 dwc3_config_threshold(dwc);
1465
1466 /*
1467 * Modify this for all supported Super Speed ports when
1468 * multiport support is added.
1469 */
1470 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
1471 (DWC3_IP_IS(DWC31)) &&
1472 dwc->maximum_speed == USB_SPEED_SUPER) {
1473 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
1474 reg |= DWC3_LLUCTL_FORCE_GEN1;
1475 dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
1476 }
1477
1478 return 0;
1479
1480 err_power_off_phy:
1481 dwc3_phy_power_off(dwc);
1482 err_exit_phy:
1483 dwc3_phy_exit(dwc);
1484 err_exit_ulpi:
1485 dwc3_ulpi_exit(dwc);
1486
1487 return ret;
1488 }
1489
dwc3_core_get_phy(struct dwc3 * dwc)1490 static int dwc3_core_get_phy(struct dwc3 *dwc)
1491 {
1492 struct device *dev = dwc->dev;
1493 struct device_node *node = dev->of_node;
1494 char phy_name[9];
1495 int ret;
1496 u8 i;
1497
1498 if (node) {
1499 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
1500 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
1501 } else {
1502 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
1503 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
1504 }
1505
1506 if (IS_ERR(dwc->usb2_phy)) {
1507 ret = PTR_ERR(dwc->usb2_phy);
1508 if (ret == -ENXIO || ret == -ENODEV)
1509 dwc->usb2_phy = NULL;
1510 else
1511 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1512 }
1513
1514 if (IS_ERR(dwc->usb3_phy)) {
1515 ret = PTR_ERR(dwc->usb3_phy);
1516 if (ret == -ENXIO || ret == -ENODEV)
1517 dwc->usb3_phy = NULL;
1518 else
1519 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1520 }
1521
1522 for (i = 0; i < dwc->num_usb2_ports; i++) {
1523 if (dwc->num_usb2_ports == 1)
1524 snprintf(phy_name, sizeof(phy_name), "usb2-phy");
1525 else
1526 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i);
1527
1528 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name);
1529 if (IS_ERR(dwc->usb2_generic_phy[i])) {
1530 ret = PTR_ERR(dwc->usb2_generic_phy[i]);
1531 if (ret == -ENOSYS || ret == -ENODEV)
1532 dwc->usb2_generic_phy[i] = NULL;
1533 else
1534 return dev_err_probe(dev, ret, "failed to lookup phy %s\n",
1535 phy_name);
1536 }
1537 }
1538
1539 for (i = 0; i < dwc->num_usb3_ports; i++) {
1540 if (dwc->num_usb3_ports == 1)
1541 snprintf(phy_name, sizeof(phy_name), "usb3-phy");
1542 else
1543 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i);
1544
1545 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name);
1546 if (IS_ERR(dwc->usb3_generic_phy[i])) {
1547 ret = PTR_ERR(dwc->usb3_generic_phy[i]);
1548 if (ret == -ENOSYS || ret == -ENODEV)
1549 dwc->usb3_generic_phy[i] = NULL;
1550 else
1551 return dev_err_probe(dev, ret, "failed to lookup phy %s\n",
1552 phy_name);
1553 }
1554 }
1555
1556 return 0;
1557 }
1558
dwc3_core_init_mode(struct dwc3 * dwc)1559 static int dwc3_core_init_mode(struct dwc3 *dwc)
1560 {
1561 struct device *dev = dwc->dev;
1562 int ret;
1563 int i;
1564
1565 switch (dwc->dr_mode) {
1566 case USB_DR_MODE_PERIPHERAL:
1567 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1568
1569 if (dwc->usb2_phy)
1570 otg_set_vbus(dwc->usb2_phy->otg, false);
1571 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE);
1572 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE);
1573
1574 ret = dwc3_gadget_init(dwc);
1575 if (ret)
1576 return dev_err_probe(dev, ret, "failed to initialize gadget\n");
1577 break;
1578 case USB_DR_MODE_HOST:
1579 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
1580
1581 if (dwc->usb2_phy)
1582 otg_set_vbus(dwc->usb2_phy->otg, true);
1583 for (i = 0; i < dwc->num_usb2_ports; i++)
1584 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST);
1585 for (i = 0; i < dwc->num_usb3_ports; i++)
1586 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST);
1587
1588 ret = dwc3_host_init(dwc);
1589 if (ret)
1590 return dev_err_probe(dev, ret, "failed to initialize host\n");
1591 break;
1592 case USB_DR_MODE_OTG:
1593 INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
1594 ret = dwc3_drd_init(dwc);
1595 if (ret)
1596 return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
1597 break;
1598 default:
1599 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
1600 return -EINVAL;
1601 }
1602
1603 return 0;
1604 }
1605
dwc3_core_exit_mode(struct dwc3 * dwc)1606 static void dwc3_core_exit_mode(struct dwc3 *dwc)
1607 {
1608 switch (dwc->dr_mode) {
1609 case USB_DR_MODE_PERIPHERAL:
1610 dwc3_gadget_exit(dwc);
1611 break;
1612 case USB_DR_MODE_HOST:
1613 dwc3_host_exit(dwc);
1614 break;
1615 case USB_DR_MODE_OTG:
1616 dwc3_drd_exit(dwc);
1617 break;
1618 default:
1619 /* do nothing */
1620 break;
1621 }
1622
1623 /* de-assert DRVVBUS for HOST and OTG mode */
1624 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1625 }
1626
dwc3_get_software_properties(struct dwc3 * dwc)1627 static void dwc3_get_software_properties(struct dwc3 *dwc)
1628 {
1629 struct device *tmpdev;
1630 u16 gsbuscfg0_reqinfo;
1631 int ret;
1632
1633 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED;
1634
1635 /*
1636 * Iterate over all parent nodes for finding swnode properties
1637 * and non-DT (non-ABI) properties.
1638 */
1639 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) {
1640 ret = device_property_read_u16(tmpdev,
1641 "snps,gsbuscfg0-reqinfo",
1642 &gsbuscfg0_reqinfo);
1643 if (!ret)
1644 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo;
1645 }
1646 }
1647
dwc3_get_properties(struct dwc3 * dwc)1648 static void dwc3_get_properties(struct dwc3 *dwc)
1649 {
1650 struct device *dev = dwc->dev;
1651 u8 lpm_nyet_threshold;
1652 u8 tx_de_emphasis;
1653 u8 hird_threshold;
1654 u8 rx_thr_num_pkt = 0;
1655 u8 rx_max_burst = 0;
1656 u8 tx_thr_num_pkt = 0;
1657 u8 tx_max_burst = 0;
1658 u8 rx_thr_num_pkt_prd = 0;
1659 u8 rx_max_burst_prd = 0;
1660 u8 tx_thr_num_pkt_prd = 0;
1661 u8 tx_max_burst_prd = 0;
1662 u8 tx_fifo_resize_max_num;
1663 const char *usb_psy_name;
1664 int ret;
1665
1666 /* default to highest possible threshold */
1667 lpm_nyet_threshold = 0xf;
1668
1669 /* default to -3.5dB de-emphasis */
1670 tx_de_emphasis = 1;
1671
1672 /*
1673 * default to assert utmi_sleep_n and use maximum allowed HIRD
1674 * threshold value of 0b1100
1675 */
1676 hird_threshold = 12;
1677
1678 /*
1679 * default to a TXFIFO size large enough to fit 6 max packets. This
1680 * allows for systems with larger bus latencies to have some headroom
1681 * for endpoints that have a large bMaxBurst value.
1682 */
1683 tx_fifo_resize_max_num = 6;
1684
1685 dwc->maximum_speed = usb_get_maximum_speed(dev);
1686 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
1687 dwc->dr_mode = usb_get_dr_mode(dev);
1688 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
1689
1690 dwc->sysdev_is_parent = device_property_read_bool(dev,
1691 "linux,sysdev_is_parent");
1692 if (dwc->sysdev_is_parent)
1693 dwc->sysdev = dwc->dev->parent;
1694 else
1695 dwc->sysdev = dwc->dev;
1696
1697 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
1698
1699 ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
1700 if (ret >= 0) {
1701 dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
1702 if (!dwc->usb_psy)
1703 dev_err(dev, "couldn't get usb power supply\n");
1704 }
1705
1706 dwc->has_lpm_erratum = device_property_read_bool(dev,
1707 "snps,has-lpm-erratum");
1708 device_property_read_u8(dev, "snps,lpm-nyet-threshold",
1709 &lpm_nyet_threshold);
1710 dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
1711 "snps,is-utmi-l1-suspend");
1712 device_property_read_u8(dev, "snps,hird-threshold",
1713 &hird_threshold);
1714 dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
1715 "snps,dis-start-transfer-quirk");
1716 dwc->usb3_lpm_capable = device_property_read_bool(dev,
1717 "snps,usb3_lpm_capable");
1718 dwc->usb2_lpm_disable = device_property_read_bool(dev,
1719 "snps,usb2-lpm-disable");
1720 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
1721 "snps,usb2-gadget-lpm-disable");
1722 device_property_read_u8(dev, "snps,rx-thr-num-pkt",
1723 &rx_thr_num_pkt);
1724 device_property_read_u8(dev, "snps,rx-max-burst",
1725 &rx_max_burst);
1726 device_property_read_u8(dev, "snps,tx-thr-num-pkt",
1727 &tx_thr_num_pkt);
1728 device_property_read_u8(dev, "snps,tx-max-burst",
1729 &tx_max_burst);
1730 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
1731 &rx_thr_num_pkt_prd);
1732 device_property_read_u8(dev, "snps,rx-max-burst-prd",
1733 &rx_max_burst_prd);
1734 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
1735 &tx_thr_num_pkt_prd);
1736 device_property_read_u8(dev, "snps,tx-max-burst-prd",
1737 &tx_max_burst_prd);
1738 dwc->do_fifo_resize = device_property_read_bool(dev,
1739 "tx-fifo-resize");
1740 if (dwc->do_fifo_resize)
1741 device_property_read_u8(dev, "tx-fifo-max-num",
1742 &tx_fifo_resize_max_num);
1743
1744 dwc->disable_scramble_quirk = device_property_read_bool(dev,
1745 "snps,disable_scramble_quirk");
1746 dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
1747 "snps,u2exit_lfps_quirk");
1748 dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
1749 "snps,u2ss_inp3_quirk");
1750 dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
1751 "snps,req_p1p2p3_quirk");
1752 dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
1753 "snps,del_p1p2p3_quirk");
1754 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
1755 "snps,del_phy_power_chg_quirk");
1756 dwc->lfps_filter_quirk = device_property_read_bool(dev,
1757 "snps,lfps_filter_quirk");
1758 dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
1759 "snps,rx_detect_poll_quirk");
1760 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
1761 "snps,dis_u3_susphy_quirk");
1762 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
1763 "snps,dis_u2_susphy_quirk");
1764 dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
1765 "snps,dis_enblslpm_quirk");
1766 dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
1767 "snps,dis-u1-entry-quirk");
1768 dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
1769 "snps,dis-u2-entry-quirk");
1770 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
1771 "snps,dis_rxdet_inp3_quirk");
1772 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
1773 "snps,dis-u2-freeclk-exists-quirk");
1774 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
1775 "snps,dis-del-phy-power-chg-quirk");
1776 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
1777 "snps,dis-tx-ipgap-linecheck-quirk");
1778 dwc->resume_hs_terminations = device_property_read_bool(dev,
1779 "snps,resume-hs-terminations");
1780 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
1781 "snps,ulpi-ext-vbus-drv");
1782 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
1783 "snps,parkmode-disable-ss-quirk");
1784 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
1785 "snps,parkmode-disable-hs-quirk");
1786 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
1787 "snps,gfladj-refclk-lpm-sel-quirk");
1788
1789 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
1790 "snps,tx_de_emphasis_quirk");
1791 device_property_read_u8(dev, "snps,tx_de_emphasis",
1792 &tx_de_emphasis);
1793 device_property_read_string(dev, "snps,hsphy_interface",
1794 &dwc->hsphy_interface);
1795 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
1796 &dwc->fladj);
1797 device_property_read_u32(dev, "snps,ref-clock-period-ns",
1798 &dwc->ref_clk_per);
1799
1800 dwc->dis_metastability_quirk = device_property_read_bool(dev,
1801 "snps,dis_metastability_quirk");
1802
1803 dwc->dis_split_quirk = device_property_read_bool(dev,
1804 "snps,dis-split-quirk");
1805
1806 dwc->lpm_nyet_threshold = lpm_nyet_threshold;
1807 dwc->tx_de_emphasis = tx_de_emphasis;
1808
1809 dwc->hird_threshold = hird_threshold;
1810
1811 dwc->rx_thr_num_pkt = rx_thr_num_pkt;
1812 dwc->rx_max_burst = rx_max_burst;
1813
1814 dwc->tx_thr_num_pkt = tx_thr_num_pkt;
1815 dwc->tx_max_burst = tx_max_burst;
1816
1817 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
1818 dwc->rx_max_burst_prd = rx_max_burst_prd;
1819
1820 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
1821 dwc->tx_max_burst_prd = tx_max_burst_prd;
1822
1823 dwc->imod_interval = 0;
1824
1825 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
1826 }
1827
1828 /* check whether the core supports IMOD */
dwc3_has_imod(struct dwc3 * dwc)1829 bool dwc3_has_imod(struct dwc3 *dwc)
1830 {
1831 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
1832 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
1833 DWC3_IP_IS(DWC32);
1834 }
1835
dwc3_check_params(struct dwc3 * dwc)1836 static void dwc3_check_params(struct dwc3 *dwc)
1837 {
1838 struct device *dev = dwc->dev;
1839 unsigned int hwparam_gen =
1840 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
1841
1842 /* Check for proper value of imod_interval */
1843 if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
1844 dev_warn(dwc->dev, "Interrupt moderation not supported\n");
1845 dwc->imod_interval = 0;
1846 }
1847
1848 /*
1849 * Workaround for STAR 9000961433 which affects only version
1850 * 3.00a of the DWC_usb3 core. This prevents the controller
1851 * interrupt from being masked while handling events. IMOD
1852 * allows us to work around this issue. Enable it for the
1853 * affected version.
1854 */
1855 if (!dwc->imod_interval &&
1856 DWC3_VER_IS(DWC3, 300A))
1857 dwc->imod_interval = 1;
1858
1859 /* Check the maximum_speed parameter */
1860 switch (dwc->maximum_speed) {
1861 case USB_SPEED_FULL:
1862 case USB_SPEED_HIGH:
1863 break;
1864 case USB_SPEED_SUPER:
1865 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
1866 dev_warn(dev, "UDC doesn't support Gen 1\n");
1867 break;
1868 case USB_SPEED_SUPER_PLUS:
1869 if ((DWC3_IP_IS(DWC32) &&
1870 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
1871 (!DWC3_IP_IS(DWC32) &&
1872 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1873 dev_warn(dev, "UDC doesn't support SSP\n");
1874 break;
1875 default:
1876 dev_err(dev, "invalid maximum_speed parameter %d\n",
1877 dwc->maximum_speed);
1878 fallthrough;
1879 case USB_SPEED_UNKNOWN:
1880 switch (hwparam_gen) {
1881 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1882 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1883 break;
1884 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1885 if (DWC3_IP_IS(DWC32))
1886 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1887 else
1888 dwc->maximum_speed = USB_SPEED_SUPER;
1889 break;
1890 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
1891 dwc->maximum_speed = USB_SPEED_HIGH;
1892 break;
1893 default:
1894 dwc->maximum_speed = USB_SPEED_SUPER;
1895 break;
1896 }
1897 break;
1898 }
1899
1900 /*
1901 * Currently the controller does not have visibility into the HW
1902 * parameter to determine the maximum number of lanes the HW supports.
1903 * If the number of lanes is not specified in the device property, then
1904 * set the default to support dual-lane for DWC_usb32 and single-lane
1905 * for DWC_usb31 for super-speed-plus.
1906 */
1907 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
1908 switch (dwc->max_ssp_rate) {
1909 case USB_SSP_GEN_2x1:
1910 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
1911 dev_warn(dev, "UDC only supports Gen 1\n");
1912 break;
1913 case USB_SSP_GEN_1x2:
1914 case USB_SSP_GEN_2x2:
1915 if (DWC3_IP_IS(DWC31))
1916 dev_warn(dev, "UDC only supports single lane\n");
1917 break;
1918 case USB_SSP_GEN_UNKNOWN:
1919 default:
1920 switch (hwparam_gen) {
1921 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1922 if (DWC3_IP_IS(DWC32))
1923 dwc->max_ssp_rate = USB_SSP_GEN_2x2;
1924 else
1925 dwc->max_ssp_rate = USB_SSP_GEN_2x1;
1926 break;
1927 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1928 if (DWC3_IP_IS(DWC32))
1929 dwc->max_ssp_rate = USB_SSP_GEN_1x2;
1930 break;
1931 }
1932 break;
1933 }
1934 }
1935 }
1936
dwc3_get_extcon(struct dwc3 * dwc)1937 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
1938 {
1939 struct device *dev = dwc->dev;
1940 struct device_node *np_phy;
1941 struct extcon_dev *edev = NULL;
1942 const char *name;
1943
1944 if (device_property_read_bool(dev, "extcon"))
1945 return extcon_get_edev_by_phandle(dev, 0);
1946
1947 /*
1948 * Device tree platforms should get extcon via phandle.
1949 * On ACPI platforms, we get the name from a device property.
1950 * This device property is for kernel internal use only and
1951 * is expected to be set by the glue code.
1952 */
1953 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
1954 return extcon_get_extcon_dev(name);
1955
1956 /*
1957 * Check explicitly if "usb-role-switch" is used since
1958 * extcon_find_edev_by_node() can not be used to check the absence of
1959 * an extcon device. In the absence of an device it will always return
1960 * EPROBE_DEFER.
1961 */
1962 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
1963 device_property_read_bool(dev, "usb-role-switch"))
1964 return NULL;
1965
1966 /*
1967 * Try to get an extcon device from the USB PHY controller's "port"
1968 * node. Check if it has the "port" node first, to avoid printing the
1969 * error message from underlying code, as it's a valid case: extcon
1970 * device (and "port" node) may be missing in case of "usb-role-switch"
1971 * or OTG mode.
1972 */
1973 np_phy = of_parse_phandle(dev->of_node, "phys", 0);
1974 if (of_graph_is_present(np_phy)) {
1975 struct device_node *np_conn;
1976
1977 np_conn = of_graph_get_remote_node(np_phy, -1, -1);
1978 if (np_conn)
1979 edev = extcon_find_edev_by_node(np_conn);
1980 of_node_put(np_conn);
1981 }
1982 of_node_put(np_phy);
1983
1984 return edev;
1985 }
1986
dwc3_get_clocks(struct dwc3 * dwc)1987 static int dwc3_get_clocks(struct dwc3 *dwc)
1988 {
1989 struct device *dev = dwc->dev;
1990
1991 if (!dev->of_node)
1992 return 0;
1993
1994 /*
1995 * Clocks are optional, but new DT platforms should support all clocks
1996 * as required by the DT-binding.
1997 * Some devices have different clock names in legacy device trees,
1998 * check for them to retain backwards compatibility.
1999 */
2000 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
2001 if (IS_ERR(dwc->bus_clk)) {
2002 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
2003 "could not get bus clock\n");
2004 }
2005
2006 if (dwc->bus_clk == NULL) {
2007 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
2008 if (IS_ERR(dwc->bus_clk)) {
2009 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
2010 "could not get bus clock\n");
2011 }
2012 }
2013
2014 dwc->ref_clk = devm_clk_get_optional(dev, "ref");
2015 if (IS_ERR(dwc->ref_clk)) {
2016 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
2017 "could not get ref clock\n");
2018 }
2019
2020 if (dwc->ref_clk == NULL) {
2021 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
2022 if (IS_ERR(dwc->ref_clk)) {
2023 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
2024 "could not get ref clock\n");
2025 }
2026 }
2027
2028 dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
2029 if (IS_ERR(dwc->susp_clk)) {
2030 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
2031 "could not get suspend clock\n");
2032 }
2033
2034 if (dwc->susp_clk == NULL) {
2035 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
2036 if (IS_ERR(dwc->susp_clk)) {
2037 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
2038 "could not get suspend clock\n");
2039 }
2040 }
2041
2042 /* specific to Rockchip RK3588 */
2043 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi");
2044 if (IS_ERR(dwc->utmi_clk)) {
2045 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk),
2046 "could not get utmi clock\n");
2047 }
2048
2049 /* specific to Rockchip RK3588 */
2050 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe");
2051 if (IS_ERR(dwc->pipe_clk)) {
2052 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk),
2053 "could not get pipe clock\n");
2054 }
2055
2056 return 0;
2057 }
2058
dwc3_get_num_ports(struct dwc3 * dwc)2059 static int dwc3_get_num_ports(struct dwc3 *dwc)
2060 {
2061 void __iomem *base;
2062 u8 major_revision;
2063 u32 offset;
2064 u32 val;
2065
2066 /*
2067 * Remap xHCI address space to access XHCI ext cap regs since it is
2068 * needed to get information on number of ports present.
2069 */
2070 base = ioremap(dwc->xhci_resources[0].start,
2071 resource_size(&dwc->xhci_resources[0]));
2072 if (!base)
2073 return -ENOMEM;
2074
2075 offset = 0;
2076 do {
2077 offset = xhci_find_next_ext_cap(base, offset,
2078 XHCI_EXT_CAPS_PROTOCOL);
2079 if (!offset)
2080 break;
2081
2082 val = readl(base + offset);
2083 major_revision = XHCI_EXT_PORT_MAJOR(val);
2084
2085 val = readl(base + offset + 0x08);
2086 if (major_revision == 0x03) {
2087 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val);
2088 } else if (major_revision <= 0x02) {
2089 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val);
2090 } else {
2091 dev_warn(dwc->dev, "unrecognized port major revision %d\n",
2092 major_revision);
2093 }
2094 } while (1);
2095
2096 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n",
2097 dwc->num_usb2_ports, dwc->num_usb3_ports);
2098
2099 iounmap(base);
2100
2101 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS ||
2102 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS)
2103 return -EINVAL;
2104
2105 return 0;
2106 }
2107
dwc3_probe(struct platform_device * pdev)2108 static int dwc3_probe(struct platform_device *pdev)
2109 {
2110 struct device *dev = &pdev->dev;
2111 struct resource *res, dwc_res;
2112 unsigned int hw_mode;
2113 void __iomem *regs;
2114 struct dwc3 *dwc;
2115 int ret;
2116
2117 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
2118 if (!dwc)
2119 return -ENOMEM;
2120
2121 dwc->dev = dev;
2122
2123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2124 if (!res) {
2125 dev_err(dev, "missing memory resource\n");
2126 return -ENODEV;
2127 }
2128
2129 dwc->xhci_resources[0].start = res->start;
2130 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
2131 DWC3_XHCI_REGS_END;
2132 dwc->xhci_resources[0].flags = res->flags;
2133 dwc->xhci_resources[0].name = res->name;
2134
2135 /*
2136 * Request memory region but exclude xHCI regs,
2137 * since it will be requested by the xhci-plat driver.
2138 */
2139 dwc_res = *res;
2140 dwc_res.start += DWC3_GLOBALS_REGS_START;
2141
2142 if (dev->of_node) {
2143 struct device_node *parent = of_get_parent(dev->of_node);
2144
2145 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
2146 dwc_res.start -= DWC3_GLOBALS_REGS_START;
2147 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
2148 }
2149
2150 of_node_put(parent);
2151 }
2152
2153 regs = devm_ioremap_resource(dev, &dwc_res);
2154 if (IS_ERR(regs))
2155 return PTR_ERR(regs);
2156
2157 dwc->regs = regs;
2158 dwc->regs_size = resource_size(&dwc_res);
2159
2160 dwc3_get_properties(dwc);
2161
2162 dwc3_get_software_properties(dwc);
2163
2164 dwc->reset = devm_reset_control_array_get_optional_shared(dev);
2165 if (IS_ERR(dwc->reset)) {
2166 ret = PTR_ERR(dwc->reset);
2167 goto err_put_psy;
2168 }
2169
2170 ret = dwc3_get_clocks(dwc);
2171 if (ret)
2172 goto err_put_psy;
2173
2174 ret = reset_control_deassert(dwc->reset);
2175 if (ret)
2176 goto err_put_psy;
2177
2178 ret = dwc3_clk_enable(dwc);
2179 if (ret)
2180 goto err_assert_reset;
2181
2182 if (!dwc3_core_is_valid(dwc)) {
2183 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
2184 ret = -ENODEV;
2185 goto err_disable_clks;
2186 }
2187
2188 platform_set_drvdata(pdev, dwc);
2189 dwc3_cache_hwparams(dwc);
2190
2191 if (!dwc->sysdev_is_parent &&
2192 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
2193 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
2194 if (ret)
2195 goto err_disable_clks;
2196 }
2197
2198 /*
2199 * Currently only DWC3 controllers that are host-only capable
2200 * can have more than one port.
2201 */
2202 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
2203 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
2204 ret = dwc3_get_num_ports(dwc);
2205 if (ret)
2206 goto err_disable_clks;
2207 } else {
2208 dwc->num_usb2_ports = 1;
2209 dwc->num_usb3_ports = 1;
2210 }
2211
2212 spin_lock_init(&dwc->lock);
2213 mutex_init(&dwc->mutex);
2214
2215 pm_runtime_get_noresume(dev);
2216 pm_runtime_set_active(dev);
2217 pm_runtime_use_autosuspend(dev);
2218 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
2219 pm_runtime_enable(dev);
2220
2221 pm_runtime_forbid(dev);
2222
2223 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
2224 if (ret) {
2225 dev_err(dwc->dev, "failed to allocate event buffers\n");
2226 ret = -ENOMEM;
2227 goto err_allow_rpm;
2228 }
2229
2230 dwc->edev = dwc3_get_extcon(dwc);
2231 if (IS_ERR(dwc->edev)) {
2232 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
2233 goto err_free_event_buffers;
2234 }
2235
2236 ret = dwc3_get_dr_mode(dwc);
2237 if (ret)
2238 goto err_free_event_buffers;
2239
2240 ret = dwc3_core_init(dwc);
2241 if (ret) {
2242 dev_err_probe(dev, ret, "failed to initialize core\n");
2243 goto err_free_event_buffers;
2244 }
2245
2246 dwc3_check_params(dwc);
2247 dwc3_debugfs_init(dwc);
2248
2249 ret = dwc3_core_init_mode(dwc);
2250 if (ret)
2251 goto err_exit_debugfs;
2252
2253 pm_runtime_put(dev);
2254
2255 dma_set_max_seg_size(dev, UINT_MAX);
2256
2257 return 0;
2258
2259 err_exit_debugfs:
2260 dwc3_debugfs_exit(dwc);
2261 dwc3_event_buffers_cleanup(dwc);
2262 dwc3_phy_power_off(dwc);
2263 dwc3_phy_exit(dwc);
2264 dwc3_ulpi_exit(dwc);
2265 err_free_event_buffers:
2266 dwc3_free_event_buffers(dwc);
2267 err_allow_rpm:
2268 pm_runtime_allow(dev);
2269 pm_runtime_disable(dev);
2270 pm_runtime_dont_use_autosuspend(dev);
2271 pm_runtime_set_suspended(dev);
2272 pm_runtime_put_noidle(dev);
2273 err_disable_clks:
2274 dwc3_clk_disable(dwc);
2275 err_assert_reset:
2276 reset_control_assert(dwc->reset);
2277 err_put_psy:
2278 if (dwc->usb_psy)
2279 power_supply_put(dwc->usb_psy);
2280
2281 return ret;
2282 }
2283
dwc3_remove(struct platform_device * pdev)2284 static void dwc3_remove(struct platform_device *pdev)
2285 {
2286 struct dwc3 *dwc = platform_get_drvdata(pdev);
2287
2288 pm_runtime_get_sync(&pdev->dev);
2289
2290 dwc3_core_exit_mode(dwc);
2291 dwc3_debugfs_exit(dwc);
2292
2293 dwc3_core_exit(dwc);
2294 dwc3_ulpi_exit(dwc);
2295
2296 pm_runtime_allow(&pdev->dev);
2297 pm_runtime_disable(&pdev->dev);
2298 pm_runtime_dont_use_autosuspend(&pdev->dev);
2299 pm_runtime_put_noidle(&pdev->dev);
2300 /*
2301 * HACK: Clear the driver data, which is currently accessed by parent
2302 * glue drivers, before allowing the parent to suspend.
2303 */
2304 platform_set_drvdata(pdev, NULL);
2305 pm_runtime_set_suspended(&pdev->dev);
2306
2307 dwc3_free_event_buffers(dwc);
2308
2309 if (dwc->usb_psy)
2310 power_supply_put(dwc->usb_psy);
2311 }
2312
2313 #ifdef CONFIG_PM
dwc3_core_init_for_resume(struct dwc3 * dwc)2314 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
2315 {
2316 int ret;
2317
2318 ret = reset_control_deassert(dwc->reset);
2319 if (ret)
2320 return ret;
2321
2322 ret = dwc3_clk_enable(dwc);
2323 if (ret)
2324 goto assert_reset;
2325
2326 ret = dwc3_core_init(dwc);
2327 if (ret)
2328 goto disable_clks;
2329
2330 return 0;
2331
2332 disable_clks:
2333 dwc3_clk_disable(dwc);
2334 assert_reset:
2335 reset_control_assert(dwc->reset);
2336
2337 return ret;
2338 }
2339
dwc3_suspend_common(struct dwc3 * dwc,pm_message_t msg)2340 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
2341 {
2342 u32 reg;
2343 int i;
2344
2345 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
2346 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
2347 DWC3_GUSB2PHYCFG_SUSPHY) ||
2348 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
2349 DWC3_GUSB3PIPECTL_SUSPHY);
2350 /*
2351 * TI AM62 platform requires SUSPHY to be
2352 * enabled for system suspend to work.
2353 */
2354 if (!dwc->susphy_state)
2355 dwc3_enable_susphy(dwc, true);
2356 }
2357
2358 switch (dwc->current_dr_role) {
2359 case DWC3_GCTL_PRTCAP_DEVICE:
2360 if (pm_runtime_suspended(dwc->dev))
2361 break;
2362 dwc3_gadget_suspend(dwc);
2363 synchronize_irq(dwc->irq_gadget);
2364 dwc3_core_exit(dwc);
2365 break;
2366 case DWC3_GCTL_PRTCAP_HOST:
2367 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2368 dwc3_core_exit(dwc);
2369 break;
2370 }
2371
2372 /* Let controller to suspend HSPHY before PHY driver suspends */
2373 if (dwc->dis_u2_susphy_quirk ||
2374 dwc->dis_enblslpm_quirk) {
2375 for (i = 0; i < dwc->num_usb2_ports; i++) {
2376 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
2377 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
2378 DWC3_GUSB2PHYCFG_SUSPHY;
2379 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
2380 }
2381
2382 /* Give some time for USB2 PHY to suspend */
2383 usleep_range(5000, 6000);
2384 }
2385
2386 for (i = 0; i < dwc->num_usb2_ports; i++)
2387 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]);
2388 for (i = 0; i < dwc->num_usb3_ports; i++)
2389 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]);
2390 break;
2391 case DWC3_GCTL_PRTCAP_OTG:
2392 /* do nothing during runtime_suspend */
2393 if (PMSG_IS_AUTO(msg))
2394 break;
2395
2396 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2397 dwc3_gadget_suspend(dwc);
2398 synchronize_irq(dwc->irq_gadget);
2399 }
2400
2401 dwc3_otg_exit(dwc);
2402 dwc3_core_exit(dwc);
2403 break;
2404 default:
2405 /* do nothing */
2406 break;
2407 }
2408
2409 return 0;
2410 }
2411
dwc3_resume_common(struct dwc3 * dwc,pm_message_t msg)2412 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2413 {
2414 int ret;
2415 u32 reg;
2416 int i;
2417
2418 switch (dwc->current_dr_role) {
2419 case DWC3_GCTL_PRTCAP_DEVICE:
2420 ret = dwc3_core_init_for_resume(dwc);
2421 if (ret)
2422 return ret;
2423
2424 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
2425 dwc3_gadget_resume(dwc);
2426 break;
2427 case DWC3_GCTL_PRTCAP_HOST:
2428 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2429 ret = dwc3_core_init_for_resume(dwc);
2430 if (ret)
2431 return ret;
2432 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
2433 break;
2434 }
2435 /* Restore GUSB2PHYCFG bits that were modified in suspend */
2436 for (i = 0; i < dwc->num_usb2_ports; i++) {
2437 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
2438 if (dwc->dis_u2_susphy_quirk)
2439 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2440
2441 if (dwc->dis_enblslpm_quirk)
2442 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
2443
2444 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
2445 }
2446
2447 for (i = 0; i < dwc->num_usb2_ports; i++)
2448 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]);
2449 for (i = 0; i < dwc->num_usb3_ports; i++)
2450 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]);
2451 break;
2452 case DWC3_GCTL_PRTCAP_OTG:
2453 /* nothing to do on runtime_resume */
2454 if (PMSG_IS_AUTO(msg))
2455 break;
2456
2457 ret = dwc3_core_init_for_resume(dwc);
2458 if (ret)
2459 return ret;
2460
2461 dwc3_set_prtcap(dwc, dwc->current_dr_role);
2462
2463 dwc3_otg_init(dwc);
2464 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
2465 dwc3_otg_host_init(dwc);
2466 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2467 dwc3_gadget_resume(dwc);
2468 }
2469
2470 break;
2471 default:
2472 /* do nothing */
2473 break;
2474 }
2475
2476 if (!PMSG_IS_AUTO(msg)) {
2477 /* restore SUSPHY state to that before system suspend. */
2478 dwc3_enable_susphy(dwc, dwc->susphy_state);
2479 }
2480
2481 return 0;
2482 }
2483
dwc3_runtime_checks(struct dwc3 * dwc)2484 static int dwc3_runtime_checks(struct dwc3 *dwc)
2485 {
2486 switch (dwc->current_dr_role) {
2487 case DWC3_GCTL_PRTCAP_DEVICE:
2488 if (dwc->connected)
2489 return -EBUSY;
2490 break;
2491 case DWC3_GCTL_PRTCAP_HOST:
2492 default:
2493 /* do nothing */
2494 break;
2495 }
2496
2497 return 0;
2498 }
2499
dwc3_runtime_suspend(struct device * dev)2500 static int dwc3_runtime_suspend(struct device *dev)
2501 {
2502 struct dwc3 *dwc = dev_get_drvdata(dev);
2503 int ret;
2504
2505 if (dwc3_runtime_checks(dwc))
2506 return -EBUSY;
2507
2508 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
2509 if (ret)
2510 return ret;
2511
2512 return 0;
2513 }
2514
dwc3_runtime_resume(struct device * dev)2515 static int dwc3_runtime_resume(struct device *dev)
2516 {
2517 struct dwc3 *dwc = dev_get_drvdata(dev);
2518 int ret;
2519
2520 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
2521 if (ret)
2522 return ret;
2523
2524 switch (dwc->current_dr_role) {
2525 case DWC3_GCTL_PRTCAP_DEVICE:
2526 if (dwc->pending_events) {
2527 pm_runtime_put(dwc->dev);
2528 dwc->pending_events = false;
2529 enable_irq(dwc->irq_gadget);
2530 }
2531 break;
2532 case DWC3_GCTL_PRTCAP_HOST:
2533 default:
2534 /* do nothing */
2535 break;
2536 }
2537
2538 pm_runtime_mark_last_busy(dev);
2539
2540 return 0;
2541 }
2542
dwc3_runtime_idle(struct device * dev)2543 static int dwc3_runtime_idle(struct device *dev)
2544 {
2545 struct dwc3 *dwc = dev_get_drvdata(dev);
2546
2547 switch (dwc->current_dr_role) {
2548 case DWC3_GCTL_PRTCAP_DEVICE:
2549 if (dwc3_runtime_checks(dwc))
2550 return -EBUSY;
2551 break;
2552 case DWC3_GCTL_PRTCAP_HOST:
2553 default:
2554 /* do nothing */
2555 break;
2556 }
2557
2558 pm_runtime_mark_last_busy(dev);
2559 pm_runtime_autosuspend(dev);
2560
2561 return 0;
2562 }
2563 #endif /* CONFIG_PM */
2564
2565 #ifdef CONFIG_PM_SLEEP
dwc3_suspend(struct device * dev)2566 static int dwc3_suspend(struct device *dev)
2567 {
2568 struct dwc3 *dwc = dev_get_drvdata(dev);
2569 int ret;
2570
2571 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
2572 if (ret)
2573 return ret;
2574
2575 pinctrl_pm_select_sleep_state(dev);
2576
2577 return 0;
2578 }
2579
dwc3_resume(struct device * dev)2580 static int dwc3_resume(struct device *dev)
2581 {
2582 struct dwc3 *dwc = dev_get_drvdata(dev);
2583 int ret = 0;
2584
2585 pinctrl_pm_select_default_state(dev);
2586
2587 pm_runtime_disable(dev);
2588 pm_runtime_set_active(dev);
2589
2590 ret = dwc3_resume_common(dwc, PMSG_RESUME);
2591 if (ret)
2592 pm_runtime_set_suspended(dev);
2593
2594 pm_runtime_enable(dev);
2595
2596 return ret;
2597 }
2598
dwc3_complete(struct device * dev)2599 static void dwc3_complete(struct device *dev)
2600 {
2601 struct dwc3 *dwc = dev_get_drvdata(dev);
2602 u32 reg;
2603
2604 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
2605 dwc->dis_split_quirk) {
2606 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
2607 reg |= DWC3_GUCTL3_SPLITDISABLE;
2608 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
2609 }
2610 }
2611 #else
2612 #define dwc3_complete NULL
2613 #endif /* CONFIG_PM_SLEEP */
2614
2615 static const struct dev_pm_ops dwc3_dev_pm_ops = {
2616 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
2617 .complete = dwc3_complete,
2618
2619 /*
2620 * Runtime suspend halts the controller on disconnection. It relies on
2621 * platforms with custom connection notification to start the controller
2622 * again.
2623 */
2624 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
2625 dwc3_runtime_idle)
2626 };
2627
2628 #ifdef CONFIG_OF
2629 static const struct of_device_id of_dwc3_match[] = {
2630 {
2631 .compatible = "snps,dwc3"
2632 },
2633 {
2634 .compatible = "synopsys,dwc3"
2635 },
2636 { },
2637 };
2638 MODULE_DEVICE_TABLE(of, of_dwc3_match);
2639 #endif
2640
2641 #ifdef CONFIG_ACPI
2642
2643 #define ACPI_ID_INTEL_BSW "808622B7"
2644
2645 static const struct acpi_device_id dwc3_acpi_match[] = {
2646 { ACPI_ID_INTEL_BSW, 0 },
2647 { },
2648 };
2649 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
2650 #endif
2651
2652 static struct platform_driver dwc3_driver = {
2653 .probe = dwc3_probe,
2654 .remove_new = dwc3_remove,
2655 .driver = {
2656 .name = "dwc3",
2657 .of_match_table = of_match_ptr(of_dwc3_match),
2658 .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
2659 .pm = &dwc3_dev_pm_ops,
2660 },
2661 };
2662
2663 module_platform_driver(dwc3_driver);
2664
2665 MODULE_ALIAS("platform:dwc3");
2666 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
2667 MODULE_LICENSE("GPL v2");
2668 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
2669