1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * core.c - DesignWare USB3 DRD Controller Core file
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_graph.h>
27 #include <linux/acpi.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/pinctrl/devinfo.h>
30 #include <linux/reset.h>
31 #include <linux/bitfield.h>
32
33 #include <linux/usb/ch9.h>
34 #include <linux/usb/gadget.h>
35 #include <linux/usb/of.h>
36 #include <linux/usb/otg.h>
37
38 #include "core.h"
39 #include "gadget.h"
40 #include "glue.h"
41 #include "io.h"
42
43 #include "debug.h"
44 #include "../host/xhci-ext-caps.h"
45
46 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
47
48 /**
49 * dwc3_get_dr_mode - Validates and sets dr_mode
50 * @dwc: pointer to our context structure
51 */
dwc3_get_dr_mode(struct dwc3 * dwc)52 static int dwc3_get_dr_mode(struct dwc3 *dwc)
53 {
54 enum usb_dr_mode mode;
55 struct device *dev = dwc->dev;
56 unsigned int hw_mode;
57
58 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
59 dwc->dr_mode = USB_DR_MODE_OTG;
60
61 mode = dwc->dr_mode;
62 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
63
64 switch (hw_mode) {
65 case DWC3_GHWPARAMS0_MODE_GADGET:
66 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
67 dev_err(dev,
68 "Controller does not support host mode.\n");
69 return -EINVAL;
70 }
71 mode = USB_DR_MODE_PERIPHERAL;
72 break;
73 case DWC3_GHWPARAMS0_MODE_HOST:
74 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
75 dev_err(dev,
76 "Controller does not support device mode.\n");
77 return -EINVAL;
78 }
79 mode = USB_DR_MODE_HOST;
80 break;
81 default:
82 if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
83 mode = USB_DR_MODE_HOST;
84 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
85 mode = USB_DR_MODE_PERIPHERAL;
86
87 /*
88 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
89 * mode. If the controller supports DRD but the dr_mode is not
90 * specified or set to OTG, then set the mode to peripheral.
91 */
92 if (mode == USB_DR_MODE_OTG && !dwc->edev &&
93 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
94 !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
95 !DWC3_VER_IS_PRIOR(DWC3, 330A))
96 mode = USB_DR_MODE_PERIPHERAL;
97 }
98
99 if (mode != dwc->dr_mode) {
100 dev_warn(dev,
101 "Configuration mismatch. dr_mode forced to %s\n",
102 mode == USB_DR_MODE_HOST ? "host" : "gadget");
103
104 dwc->dr_mode = mode;
105 }
106
107 return 0;
108 }
109
dwc3_enable_susphy(struct dwc3 * dwc,bool enable)110 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
111 {
112 u32 reg;
113 int i;
114
115 for (i = 0; i < dwc->num_usb3_ports; i++) {
116 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(i));
117 if (enable && !dwc->dis_u3_susphy_quirk)
118 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
119 else
120 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
121
122 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(i), reg);
123 }
124
125 for (i = 0; i < dwc->num_usb2_ports; i++) {
126 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
127 if (enable && !dwc->dis_u2_susphy_quirk)
128 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
129 else
130 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
131
132 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
133 }
134 }
135
dwc3_set_prtcap(struct dwc3 * dwc,u32 mode,bool ignore_susphy)136 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
137 {
138 unsigned int hw_mode;
139 u32 reg;
140
141 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
142
143 /*
144 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
145 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
146 * and they can be set after core initialization.
147 */
148 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
149 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
150 if (DWC3_GCTL_PRTCAP(reg) != mode)
151 dwc3_enable_susphy(dwc, false);
152 }
153
154 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
155 reg |= DWC3_GCTL_PRTCAPDIR(mode);
156 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
157
158 dwc->current_dr_role = mode;
159 }
160
__dwc3_set_mode(struct work_struct * work)161 static void __dwc3_set_mode(struct work_struct *work)
162 {
163 struct dwc3 *dwc = work_to_dwc(work);
164 unsigned long flags;
165 int ret;
166 u32 reg;
167 u32 desired_dr_role;
168 int i;
169
170 mutex_lock(&dwc->mutex);
171 spin_lock_irqsave(&dwc->lock, flags);
172 desired_dr_role = dwc->desired_dr_role;
173 spin_unlock_irqrestore(&dwc->lock, flags);
174
175 pm_runtime_get_sync(dwc->dev);
176
177 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
178 dwc3_otg_update(dwc, 0);
179
180 if (!desired_dr_role)
181 goto out;
182
183 if (desired_dr_role == dwc->current_dr_role)
184 goto out;
185
186 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
187 goto out;
188
189 switch (dwc->current_dr_role) {
190 case DWC3_GCTL_PRTCAP_HOST:
191 dwc3_host_exit(dwc);
192 break;
193 case DWC3_GCTL_PRTCAP_DEVICE:
194 dwc3_gadget_exit(dwc);
195 dwc3_event_buffers_cleanup(dwc);
196 break;
197 case DWC3_GCTL_PRTCAP_OTG:
198 dwc3_otg_exit(dwc);
199 spin_lock_irqsave(&dwc->lock, flags);
200 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
201 spin_unlock_irqrestore(&dwc->lock, flags);
202 dwc3_otg_update(dwc, 1);
203 break;
204 default:
205 break;
206 }
207
208 /*
209 * When current_dr_role is not set, there's no role switching.
210 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
211 */
212 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
213 DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
214 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
215 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
216 reg |= DWC3_GCTL_CORESOFTRESET;
217 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
218
219 /*
220 * Wait for internal clocks to synchronized. DWC_usb31 and
221 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
222 * keep it consistent across different IPs, let's wait up to
223 * 100ms before clearing GCTL.CORESOFTRESET.
224 */
225 msleep(100);
226
227 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
228 reg &= ~DWC3_GCTL_CORESOFTRESET;
229 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
230 }
231
232 spin_lock_irqsave(&dwc->lock, flags);
233
234 dwc3_set_prtcap(dwc, desired_dr_role, false);
235
236 spin_unlock_irqrestore(&dwc->lock, flags);
237
238 switch (desired_dr_role) {
239 case DWC3_GCTL_PRTCAP_HOST:
240 ret = dwc3_host_init(dwc);
241 if (ret) {
242 dev_err(dwc->dev, "failed to initialize host\n");
243 } else {
244 if (dwc->usb2_phy)
245 otg_set_vbus(dwc->usb2_phy->otg, true);
246
247 for (i = 0; i < dwc->num_usb2_ports; i++)
248 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST);
249 for (i = 0; i < dwc->num_usb3_ports; i++)
250 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST);
251
252 if (dwc->dis_split_quirk) {
253 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
254 reg |= DWC3_GUCTL3_SPLITDISABLE;
255 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
256 }
257 }
258 break;
259 case DWC3_GCTL_PRTCAP_DEVICE:
260 dwc3_core_soft_reset(dwc);
261
262 dwc3_event_buffers_setup(dwc);
263
264 if (dwc->usb2_phy)
265 otg_set_vbus(dwc->usb2_phy->otg, false);
266 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE);
267 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE);
268
269 ret = dwc3_gadget_init(dwc);
270 if (ret)
271 dev_err(dwc->dev, "failed to initialize peripheral\n");
272 break;
273 case DWC3_GCTL_PRTCAP_OTG:
274 dwc3_otg_init(dwc);
275 dwc3_otg_update(dwc, 0);
276 break;
277 default:
278 break;
279 }
280
281 out:
282 pm_runtime_mark_last_busy(dwc->dev);
283 pm_runtime_put_autosuspend(dwc->dev);
284 mutex_unlock(&dwc->mutex);
285 }
286
dwc3_set_mode(struct dwc3 * dwc,u32 mode)287 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
288 {
289 unsigned long flags;
290
291 if (dwc->dr_mode != USB_DR_MODE_OTG)
292 return;
293
294 spin_lock_irqsave(&dwc->lock, flags);
295 dwc->desired_dr_role = mode;
296 spin_unlock_irqrestore(&dwc->lock, flags);
297
298 queue_work(system_freezable_wq, &dwc->drd_work);
299 }
300
dwc3_core_fifo_space(struct dwc3_ep * dep,u8 type)301 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
302 {
303 struct dwc3 *dwc = dep->dwc;
304 u32 reg;
305
306 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
307 DWC3_GDBGFIFOSPACE_NUM(dep->number) |
308 DWC3_GDBGFIFOSPACE_TYPE(type));
309
310 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
311
312 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
313 }
314
315 /**
316 * dwc3_core_soft_reset - Issues core soft reset and PHY reset
317 * @dwc: pointer to our context structure
318 */
dwc3_core_soft_reset(struct dwc3 * dwc)319 int dwc3_core_soft_reset(struct dwc3 *dwc)
320 {
321 u32 reg;
322 int retries = 1000;
323
324 /*
325 * We're resetting only the device side because, if we're in host mode,
326 * XHCI driver will reset the host block. If dwc3 was configured for
327 * host-only mode, then we can return early.
328 */
329 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
330 return 0;
331
332 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
333 reg |= DWC3_DCTL_CSFTRST;
334 reg &= ~DWC3_DCTL_RUN_STOP;
335 dwc3_gadget_dctl_write_safe(dwc, reg);
336
337 /*
338 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
339 * is cleared only after all the clocks are synchronized. This can
340 * take a little more than 50ms. Set the polling rate at 20ms
341 * for 10 times instead.
342 */
343 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
344 retries = 10;
345
346 do {
347 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
348 if (!(reg & DWC3_DCTL_CSFTRST))
349 goto done;
350
351 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
352 msleep(20);
353 else
354 udelay(1);
355 } while (--retries);
356
357 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
358 return -ETIMEDOUT;
359
360 done:
361 /*
362 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
363 * is cleared, we must wait at least 50ms before accessing the PHY
364 * domain (synchronization delay).
365 */
366 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
367 msleep(50);
368
369 return 0;
370 }
371
372 /*
373 * dwc3_frame_length_adjustment - Adjusts frame length if required
374 * @dwc3: Pointer to our controller context structure
375 */
dwc3_frame_length_adjustment(struct dwc3 * dwc)376 static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
377 {
378 u32 reg;
379 u32 dft;
380
381 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
382 return;
383
384 if (dwc->fladj == 0)
385 return;
386
387 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
388 dft = reg & DWC3_GFLADJ_30MHZ_MASK;
389 if (dft != dwc->fladj) {
390 reg &= ~DWC3_GFLADJ_30MHZ_MASK;
391 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
392 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
393 }
394 }
395
396 /**
397 * dwc3_ref_clk_period - Reference clock period configuration
398 * Default reference clock period depends on hardware
399 * configuration. For systems with reference clock that differs
400 * from the default, this will set clock period in DWC3_GUCTL
401 * register.
402 * @dwc: Pointer to our controller context structure
403 */
dwc3_ref_clk_period(struct dwc3 * dwc)404 static void dwc3_ref_clk_period(struct dwc3 *dwc)
405 {
406 unsigned long period;
407 unsigned long fladj;
408 unsigned long decr;
409 unsigned long rate;
410 u32 reg;
411
412 if (dwc->ref_clk) {
413 rate = clk_get_rate(dwc->ref_clk);
414 if (!rate)
415 return;
416 period = NSEC_PER_SEC / rate;
417 } else if (dwc->ref_clk_per) {
418 period = dwc->ref_clk_per;
419 rate = NSEC_PER_SEC / period;
420 } else {
421 return;
422 }
423
424 reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
425 reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
426 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
427 dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
428
429 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
430 return;
431
432 /*
433 * The calculation below is
434 *
435 * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
436 *
437 * but rearranged for fixed-point arithmetic. The division must be
438 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
439 * neither does rate * period).
440 *
441 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
442 * nanoseconds of error caused by the truncation which happened during
443 * the division when calculating rate or period (whichever one was
444 * derived from the other). We first calculate the relative error, then
445 * scale it to units of 8 ppm.
446 */
447 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
448 fladj -= 125000;
449
450 /*
451 * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
452 */
453 decr = 480000000 / rate;
454
455 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
456 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
457 & ~DWC3_GFLADJ_240MHZDECR
458 & ~DWC3_GFLADJ_240MHZDECR_PLS1;
459 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
460 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
461 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
462
463 if (dwc->gfladj_refclk_lpm_sel)
464 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
465
466 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
467 }
468
469 /**
470 * dwc3_free_one_event_buffer - Frees one event buffer
471 * @dwc: Pointer to our controller context structure
472 * @evt: Pointer to event buffer to be freed
473 */
dwc3_free_one_event_buffer(struct dwc3 * dwc,struct dwc3_event_buffer * evt)474 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
475 struct dwc3_event_buffer *evt)
476 {
477 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
478 }
479
480 /**
481 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
482 * @dwc: Pointer to our controller context structure
483 * @length: size of the event buffer
484 *
485 * Returns a pointer to the allocated event buffer structure on success
486 * otherwise ERR_PTR(errno).
487 */
dwc3_alloc_one_event_buffer(struct dwc3 * dwc,unsigned int length)488 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
489 unsigned int length)
490 {
491 struct dwc3_event_buffer *evt;
492
493 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
494 if (!evt)
495 return ERR_PTR(-ENOMEM);
496
497 evt->dwc = dwc;
498 evt->length = length;
499 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
500 if (!evt->cache)
501 return ERR_PTR(-ENOMEM);
502
503 evt->buf = dma_alloc_coherent(dwc->sysdev, length,
504 &evt->dma, GFP_KERNEL);
505 if (!evt->buf)
506 return ERR_PTR(-ENOMEM);
507
508 return evt;
509 }
510
511 /**
512 * dwc3_free_event_buffers - frees all allocated event buffers
513 * @dwc: Pointer to our controller context structure
514 */
dwc3_free_event_buffers(struct dwc3 * dwc)515 static void dwc3_free_event_buffers(struct dwc3 *dwc)
516 {
517 struct dwc3_event_buffer *evt;
518
519 evt = dwc->ev_buf;
520 if (evt)
521 dwc3_free_one_event_buffer(dwc, evt);
522 }
523
524 /**
525 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
526 * @dwc: pointer to our controller context structure
527 * @length: size of event buffer
528 *
529 * Returns 0 on success otherwise negative errno. In the error case, dwc
530 * may contain some buffers allocated but not all which were requested.
531 */
dwc3_alloc_event_buffers(struct dwc3 * dwc,unsigned int length)532 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
533 {
534 struct dwc3_event_buffer *evt;
535 unsigned int hw_mode;
536
537 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
538 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
539 dwc->ev_buf = NULL;
540 return 0;
541 }
542
543 evt = dwc3_alloc_one_event_buffer(dwc, length);
544 if (IS_ERR(evt)) {
545 dev_err(dwc->dev, "can't allocate event buffer\n");
546 return PTR_ERR(evt);
547 }
548 dwc->ev_buf = evt;
549
550 return 0;
551 }
552
553 /**
554 * dwc3_event_buffers_setup - setup our allocated event buffers
555 * @dwc: pointer to our controller context structure
556 *
557 * Returns 0 on success otherwise negative errno.
558 */
dwc3_event_buffers_setup(struct dwc3 * dwc)559 int dwc3_event_buffers_setup(struct dwc3 *dwc)
560 {
561 struct dwc3_event_buffer *evt;
562 u32 reg;
563
564 if (!dwc->ev_buf)
565 return 0;
566
567 evt = dwc->ev_buf;
568 evt->lpos = 0;
569 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
570 lower_32_bits(evt->dma));
571 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
572 upper_32_bits(evt->dma));
573 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
574 DWC3_GEVNTSIZ_SIZE(evt->length));
575
576 /* Clear any stale event */
577 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
578 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
579 return 0;
580 }
581
dwc3_event_buffers_cleanup(struct dwc3 * dwc)582 void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
583 {
584 struct dwc3_event_buffer *evt;
585 u32 reg;
586
587 if (!dwc->ev_buf)
588 return;
589 /*
590 * Exynos platforms may not be able to access event buffer if the
591 * controller failed to halt on dwc3_core_exit().
592 */
593 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
594 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
595 return;
596
597 evt = dwc->ev_buf;
598
599 evt->lpos = 0;
600
601 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
602 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
603 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
604 | DWC3_GEVNTSIZ_SIZE(0));
605
606 /* Clear any stale event */
607 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
608 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
609 }
610
dwc3_core_num_eps(struct dwc3 * dwc)611 static void dwc3_core_num_eps(struct dwc3 *dwc)
612 {
613 struct dwc3_hwparams *parms = &dwc->hwparams;
614
615 dwc->num_eps = DWC3_NUM_EPS(parms);
616 }
617
dwc3_cache_hwparams(struct dwc3 * dwc)618 static void dwc3_cache_hwparams(struct dwc3 *dwc)
619 {
620 struct dwc3_hwparams *parms = &dwc->hwparams;
621
622 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
623 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
624 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
625 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
626 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
627 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
628 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
629 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
630 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
631
632 if (DWC3_IP_IS(DWC32))
633 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
634 }
635
dwc3_config_soc_bus(struct dwc3 * dwc)636 static void dwc3_config_soc_bus(struct dwc3 *dwc)
637 {
638 if (dwc->gsbuscfg0_reqinfo != DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) {
639 u32 reg;
640
641 reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
642 reg &= ~DWC3_GSBUSCFG0_REQINFO(~0);
643 reg |= DWC3_GSBUSCFG0_REQINFO(dwc->gsbuscfg0_reqinfo);
644 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
645 }
646 }
647
dwc3_core_ulpi_init(struct dwc3 * dwc)648 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
649 {
650 int intf;
651 int ret = 0;
652
653 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
654
655 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
656 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
657 dwc->hsphy_interface &&
658 !strncmp(dwc->hsphy_interface, "ulpi", 4)))
659 ret = dwc3_ulpi_init(dwc);
660
661 return ret;
662 }
663
dwc3_ss_phy_setup(struct dwc3 * dwc,int index)664 static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
665 {
666 u32 reg;
667
668 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(index));
669
670 /*
671 * Make sure UX_EXIT_PX is cleared as that causes issues with some
672 * PHYs. Also, this bit is not supposed to be used in normal operation.
673 */
674 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
675
676 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
677 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
678
679 if (dwc->u2ss_inp3_quirk)
680 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
681
682 if (dwc->dis_rxdet_inp3_quirk)
683 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
684
685 if (dwc->req_p1p2p3_quirk)
686 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
687
688 if (dwc->del_p1p2p3_quirk)
689 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
690
691 if (dwc->del_phy_power_chg_quirk)
692 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
693
694 if (dwc->lfps_filter_quirk)
695 reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
696
697 if (dwc->rx_detect_poll_quirk)
698 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
699
700 if (dwc->tx_de_emphasis_quirk)
701 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
702
703 if (dwc->dis_del_phy_power_chg_quirk)
704 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
705
706 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(index), reg);
707
708 return 0;
709 }
710
dwc3_hs_phy_setup(struct dwc3 * dwc,int index)711 static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
712 {
713 u32 reg;
714
715 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(index));
716
717 /* Select the HS PHY interface */
718 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
719 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
720 if (dwc->hsphy_interface &&
721 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
722 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
723 break;
724 } else if (dwc->hsphy_interface &&
725 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
726 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
727 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg);
728 } else {
729 /* Relying on default value. */
730 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
731 break;
732 }
733 fallthrough;
734 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
735 default:
736 break;
737 }
738
739 switch (dwc->hsphy_mode) {
740 case USBPHY_INTERFACE_MODE_UTMI:
741 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
742 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
743 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
744 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
745 break;
746 case USBPHY_INTERFACE_MODE_UTMIW:
747 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
748 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
749 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
750 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
751 break;
752 default:
753 break;
754 }
755
756 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
757 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
758
759 if (dwc->dis_enblslpm_quirk)
760 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
761 else
762 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
763
764 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
765 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
766
767 /*
768 * Some ULPI USB PHY does not support internal VBUS supply, to drive
769 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
770 * bit of OTG_CTRL register. Controller configures the USB2 PHY
771 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
772 * with an external supply.
773 */
774 if (dwc->ulpi_ext_vbus_drv)
775 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
776
777 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(index), reg);
778
779 return 0;
780 }
781
782 /**
783 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
784 * @dwc: Pointer to our controller context structure
785 *
786 * Returns 0 on success. The USB PHY interfaces are configured but not
787 * initialized. The PHY interfaces and the PHYs get initialized together with
788 * the core in dwc3_core_init.
789 */
dwc3_phy_setup(struct dwc3 * dwc)790 static int dwc3_phy_setup(struct dwc3 *dwc)
791 {
792 int i;
793 int ret;
794
795 for (i = 0; i < dwc->num_usb3_ports; i++) {
796 ret = dwc3_ss_phy_setup(dwc, i);
797 if (ret)
798 return ret;
799 }
800
801 for (i = 0; i < dwc->num_usb2_ports; i++) {
802 ret = dwc3_hs_phy_setup(dwc, i);
803 if (ret)
804 return ret;
805 }
806
807 return 0;
808 }
809
dwc3_phy_init(struct dwc3 * dwc)810 static int dwc3_phy_init(struct dwc3 *dwc)
811 {
812 int ret;
813 int i;
814 int j;
815
816 usb_phy_init(dwc->usb2_phy);
817 usb_phy_init(dwc->usb3_phy);
818
819 for (i = 0; i < dwc->num_usb2_ports; i++) {
820 ret = phy_init(dwc->usb2_generic_phy[i]);
821 if (ret < 0)
822 goto err_exit_usb2_phy;
823 }
824
825 for (j = 0; j < dwc->num_usb3_ports; j++) {
826 ret = phy_init(dwc->usb3_generic_phy[j]);
827 if (ret < 0)
828 goto err_exit_usb3_phy;
829 }
830
831 /*
832 * Above DWC_usb3.0 1.94a, it is recommended to set
833 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
834 * coreConsultant configuration. So default value will be '0' when the
835 * core is reset. Application needs to set it to '1' after the core
836 * initialization is completed.
837 *
838 * Certain phy requires to be in P0 power state during initialization.
839 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
840 * prior to phy init to maintain in the P0 state.
841 *
842 * After phy initialization, some phy operations can only be executed
843 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
844 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
845 * blocking phy ops.
846 */
847 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
848 dwc3_enable_susphy(dwc, true);
849
850 return 0;
851
852 err_exit_usb3_phy:
853 while (--j >= 0)
854 phy_exit(dwc->usb3_generic_phy[j]);
855
856 err_exit_usb2_phy:
857 while (--i >= 0)
858 phy_exit(dwc->usb2_generic_phy[i]);
859
860 usb_phy_shutdown(dwc->usb3_phy);
861 usb_phy_shutdown(dwc->usb2_phy);
862
863 return ret;
864 }
865
dwc3_phy_exit(struct dwc3 * dwc)866 static void dwc3_phy_exit(struct dwc3 *dwc)
867 {
868 int i;
869
870 for (i = 0; i < dwc->num_usb3_ports; i++)
871 phy_exit(dwc->usb3_generic_phy[i]);
872
873 for (i = 0; i < dwc->num_usb2_ports; i++)
874 phy_exit(dwc->usb2_generic_phy[i]);
875
876 usb_phy_shutdown(dwc->usb3_phy);
877 usb_phy_shutdown(dwc->usb2_phy);
878 }
879
dwc3_phy_power_on(struct dwc3 * dwc)880 static int dwc3_phy_power_on(struct dwc3 *dwc)
881 {
882 int ret;
883 int i;
884 int j;
885
886 usb_phy_set_suspend(dwc->usb2_phy, 0);
887 usb_phy_set_suspend(dwc->usb3_phy, 0);
888
889 for (i = 0; i < dwc->num_usb2_ports; i++) {
890 ret = phy_power_on(dwc->usb2_generic_phy[i]);
891 if (ret < 0)
892 goto err_power_off_usb2_phy;
893 }
894
895 for (j = 0; j < dwc->num_usb3_ports; j++) {
896 ret = phy_power_on(dwc->usb3_generic_phy[j]);
897 if (ret < 0)
898 goto err_power_off_usb3_phy;
899 }
900
901 return 0;
902
903 err_power_off_usb3_phy:
904 while (--j >= 0)
905 phy_power_off(dwc->usb3_generic_phy[j]);
906
907 err_power_off_usb2_phy:
908 while (--i >= 0)
909 phy_power_off(dwc->usb2_generic_phy[i]);
910
911 usb_phy_set_suspend(dwc->usb3_phy, 1);
912 usb_phy_set_suspend(dwc->usb2_phy, 1);
913
914 return ret;
915 }
916
dwc3_phy_power_off(struct dwc3 * dwc)917 static void dwc3_phy_power_off(struct dwc3 *dwc)
918 {
919 int i;
920
921 for (i = 0; i < dwc->num_usb3_ports; i++)
922 phy_power_off(dwc->usb3_generic_phy[i]);
923
924 for (i = 0; i < dwc->num_usb2_ports; i++)
925 phy_power_off(dwc->usb2_generic_phy[i]);
926
927 usb_phy_set_suspend(dwc->usb3_phy, 1);
928 usb_phy_set_suspend(dwc->usb2_phy, 1);
929 }
930
dwc3_clk_enable(struct dwc3 * dwc)931 static int dwc3_clk_enable(struct dwc3 *dwc)
932 {
933 int ret;
934
935 ret = clk_prepare_enable(dwc->bus_clk);
936 if (ret)
937 return ret;
938
939 ret = clk_prepare_enable(dwc->ref_clk);
940 if (ret)
941 goto disable_bus_clk;
942
943 ret = clk_prepare_enable(dwc->susp_clk);
944 if (ret)
945 goto disable_ref_clk;
946
947 ret = clk_prepare_enable(dwc->utmi_clk);
948 if (ret)
949 goto disable_susp_clk;
950
951 ret = clk_prepare_enable(dwc->pipe_clk);
952 if (ret)
953 goto disable_utmi_clk;
954
955 return 0;
956
957 disable_utmi_clk:
958 clk_disable_unprepare(dwc->utmi_clk);
959 disable_susp_clk:
960 clk_disable_unprepare(dwc->susp_clk);
961 disable_ref_clk:
962 clk_disable_unprepare(dwc->ref_clk);
963 disable_bus_clk:
964 clk_disable_unprepare(dwc->bus_clk);
965 return ret;
966 }
967
dwc3_clk_disable(struct dwc3 * dwc)968 static void dwc3_clk_disable(struct dwc3 *dwc)
969 {
970 clk_disable_unprepare(dwc->pipe_clk);
971 clk_disable_unprepare(dwc->utmi_clk);
972 clk_disable_unprepare(dwc->susp_clk);
973 clk_disable_unprepare(dwc->ref_clk);
974 clk_disable_unprepare(dwc->bus_clk);
975 }
976
dwc3_core_exit(struct dwc3 * dwc)977 static void dwc3_core_exit(struct dwc3 *dwc)
978 {
979 dwc3_event_buffers_cleanup(dwc);
980 dwc3_phy_power_off(dwc);
981 dwc3_phy_exit(dwc);
982 dwc3_clk_disable(dwc);
983 reset_control_assert(dwc->reset);
984 }
985
dwc3_core_is_valid(struct dwc3 * dwc)986 static bool dwc3_core_is_valid(struct dwc3 *dwc)
987 {
988 u32 reg;
989
990 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
991 dwc->ip = DWC3_GSNPS_ID(reg);
992
993 /* This should read as U3 followed by revision number */
994 if (DWC3_IP_IS(DWC3)) {
995 dwc->revision = reg;
996 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
997 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
998 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
999 } else {
1000 return false;
1001 }
1002
1003 return true;
1004 }
1005
dwc3_core_setup_global_control(struct dwc3 * dwc)1006 static void dwc3_core_setup_global_control(struct dwc3 *dwc)
1007 {
1008 unsigned int power_opt;
1009 unsigned int hw_mode;
1010 u32 reg;
1011
1012 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1013 reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
1014 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1015 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
1016
1017 switch (power_opt) {
1018 case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
1019 /**
1020 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
1021 * issue which would cause xHCI compliance tests to fail.
1022 *
1023 * Because of that we cannot enable clock gating on such
1024 * configurations.
1025 *
1026 * Refers to:
1027 *
1028 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
1029 * SOF/ITP Mode Used
1030 */
1031 if ((dwc->dr_mode == USB_DR_MODE_HOST ||
1032 dwc->dr_mode == USB_DR_MODE_OTG) &&
1033 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
1034 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
1035 else
1036 reg &= ~DWC3_GCTL_DSBLCLKGTNG;
1037 break;
1038 case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
1039 /*
1040 * REVISIT Enabling this bit so that host-mode hibernation
1041 * will work. Device-mode hibernation is not yet implemented.
1042 */
1043 reg |= DWC3_GCTL_GBLHIBERNATIONEN;
1044 break;
1045 default:
1046 /* nothing */
1047 break;
1048 }
1049
1050 /*
1051 * This is a workaround for STAR#4846132, which only affects
1052 * DWC_usb31 version2.00a operating in host mode.
1053 *
1054 * There is a problem in DWC_usb31 version 2.00a operating
1055 * in host mode that would cause a CSR read timeout When CSR
1056 * read coincides with RAM Clock Gating Entry. By disable
1057 * Clock Gating, sacrificing power consumption for normal
1058 * operation.
1059 */
1060 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
1061 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
1062 reg |= DWC3_GCTL_DSBLCLKGTNG;
1063
1064 /* check if current dwc3 is on simulation board */
1065 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
1066 dev_info(dwc->dev, "Running with FPGA optimizations\n");
1067 dwc->is_fpga = true;
1068 }
1069
1070 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
1071 "disable_scramble cannot be used on non-FPGA builds\n");
1072
1073 if (dwc->disable_scramble_quirk && dwc->is_fpga)
1074 reg |= DWC3_GCTL_DISSCRAMBLE;
1075 else
1076 reg &= ~DWC3_GCTL_DISSCRAMBLE;
1077
1078 if (dwc->u2exit_lfps_quirk)
1079 reg |= DWC3_GCTL_U2EXIT_LFPS;
1080
1081 /*
1082 * WORKAROUND: DWC3 revisions <1.90a have a bug
1083 * where the device can fail to connect at SuperSpeed
1084 * and falls back to high-speed mode which causes
1085 * the device to enter a Connect/Disconnect loop
1086 */
1087 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
1088 reg |= DWC3_GCTL_U2RSTECN;
1089
1090 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1091 }
1092
1093 static int dwc3_core_get_phy(struct dwc3 *dwc);
1094 static int dwc3_core_ulpi_init(struct dwc3 *dwc);
1095
1096 /* set global incr burst type configuration registers */
dwc3_set_incr_burst_type(struct dwc3 * dwc)1097 static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
1098 {
1099 struct device *dev = dwc->dev;
1100 /* incrx_mode : for INCR burst type. */
1101 bool incrx_mode;
1102 /* incrx_size : for size of INCRX burst. */
1103 u32 incrx_size;
1104 u32 *vals;
1105 u32 cfg;
1106 int ntype;
1107 int ret;
1108 int i;
1109
1110 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
1111
1112 /*
1113 * Handle property "snps,incr-burst-type-adjustment".
1114 * Get the number of value from this property:
1115 * result <= 0, means this property is not supported.
1116 * result = 1, means INCRx burst mode supported.
1117 * result > 1, means undefined length burst mode supported.
1118 */
1119 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
1120 if (ntype <= 0)
1121 return;
1122
1123 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
1124 if (!vals)
1125 return;
1126
1127 /* Get INCR burst type, and parse it */
1128 ret = device_property_read_u32_array(dev,
1129 "snps,incr-burst-type-adjustment", vals, ntype);
1130 if (ret) {
1131 kfree(vals);
1132 dev_err(dev, "Error to get property\n");
1133 return;
1134 }
1135
1136 incrx_size = *vals;
1137
1138 if (ntype > 1) {
1139 /* INCRX (undefined length) burst mode */
1140 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
1141 for (i = 1; i < ntype; i++) {
1142 if (vals[i] > incrx_size)
1143 incrx_size = vals[i];
1144 }
1145 } else {
1146 /* INCRX burst mode */
1147 incrx_mode = INCRX_BURST_MODE;
1148 }
1149
1150 kfree(vals);
1151
1152 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
1153 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
1154 if (incrx_mode)
1155 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
1156 switch (incrx_size) {
1157 case 256:
1158 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
1159 break;
1160 case 128:
1161 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
1162 break;
1163 case 64:
1164 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
1165 break;
1166 case 32:
1167 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
1168 break;
1169 case 16:
1170 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
1171 break;
1172 case 8:
1173 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
1174 break;
1175 case 4:
1176 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
1177 break;
1178 case 1:
1179 break;
1180 default:
1181 dev_err(dev, "Invalid property\n");
1182 break;
1183 }
1184
1185 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
1186 }
1187
dwc3_set_power_down_clk_scale(struct dwc3 * dwc)1188 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
1189 {
1190 u32 scale;
1191 u32 reg;
1192
1193 if (!dwc->susp_clk)
1194 return;
1195
1196 /*
1197 * The power down scale field specifies how many suspend_clk
1198 * periods fit into a 16KHz clock period. When performing
1199 * the division, round up the remainder.
1200 *
1201 * The power down scale value is calculated using the fastest
1202 * frequency of the suspend_clk. If it isn't fixed (but within
1203 * the accuracy requirement), the driver may not know the max
1204 * rate of the suspend_clk, so only update the power down scale
1205 * if the default is less than the calculated value from
1206 * clk_get_rate() or if the default is questionably high
1207 * (3x or more) to be within the requirement.
1208 */
1209 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
1210 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1211 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
1212 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
1213 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
1214 reg |= DWC3_GCTL_PWRDNSCALE(scale);
1215 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1216 }
1217 }
1218
dwc3_config_threshold(struct dwc3 * dwc)1219 static void dwc3_config_threshold(struct dwc3 *dwc)
1220 {
1221 u32 reg;
1222 u8 rx_thr_num;
1223 u8 rx_maxburst;
1224 u8 tx_thr_num;
1225 u8 tx_maxburst;
1226
1227 /*
1228 * Must config both number of packets and max burst settings to enable
1229 * RX and/or TX threshold.
1230 */
1231 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
1232 rx_thr_num = dwc->rx_thr_num_pkt_prd;
1233 rx_maxburst = dwc->rx_max_burst_prd;
1234 tx_thr_num = dwc->tx_thr_num_pkt_prd;
1235 tx_maxburst = dwc->tx_max_burst_prd;
1236
1237 if (rx_thr_num && rx_maxburst) {
1238 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1239 reg |= DWC31_RXTHRNUMPKTSEL_PRD;
1240
1241 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
1242 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
1243
1244 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
1245 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
1246
1247 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1248 }
1249
1250 if (tx_thr_num && tx_maxburst) {
1251 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1252 reg |= DWC31_TXTHRNUMPKTSEL_PRD;
1253
1254 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
1255 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
1256
1257 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
1258 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
1259
1260 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1261 }
1262 }
1263
1264 rx_thr_num = dwc->rx_thr_num_pkt;
1265 rx_maxburst = dwc->rx_max_burst;
1266 tx_thr_num = dwc->tx_thr_num_pkt;
1267 tx_maxburst = dwc->tx_max_burst;
1268
1269 if (DWC3_IP_IS(DWC3)) {
1270 if (rx_thr_num && rx_maxburst) {
1271 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1272 reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
1273
1274 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
1275 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1276
1277 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1278 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1279
1280 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1281 }
1282
1283 if (tx_thr_num && tx_maxburst) {
1284 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1285 reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
1286
1287 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
1288 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1289
1290 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1291 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1292
1293 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1294 }
1295 } else {
1296 if (rx_thr_num && rx_maxburst) {
1297 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1298 reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
1299
1300 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
1301 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1302
1303 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1304 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1305
1306 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1307 }
1308
1309 if (tx_thr_num && tx_maxburst) {
1310 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1311 reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
1312
1313 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
1314 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1315
1316 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1317 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1318
1319 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1320 }
1321 }
1322 }
1323
1324 /**
1325 * dwc3_core_init - Low-level initialization of DWC3 Core
1326 * @dwc: Pointer to our controller context structure
1327 *
1328 * Returns 0 on success otherwise negative errno.
1329 */
dwc3_core_init(struct dwc3 * dwc)1330 static int dwc3_core_init(struct dwc3 *dwc)
1331 {
1332 unsigned int hw_mode;
1333 u32 reg;
1334 int ret;
1335
1336 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1337
1338 /*
1339 * Write Linux Version Code to our GUID register so it's easy to figure
1340 * out which kernel version a bug was found.
1341 */
1342 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
1343
1344 ret = dwc3_phy_setup(dwc);
1345 if (ret)
1346 return ret;
1347
1348 if (!dwc->ulpi_ready) {
1349 ret = dwc3_core_ulpi_init(dwc);
1350 if (ret) {
1351 if (ret == -ETIMEDOUT) {
1352 dwc3_core_soft_reset(dwc);
1353 ret = -EPROBE_DEFER;
1354 }
1355 return ret;
1356 }
1357 dwc->ulpi_ready = true;
1358 }
1359
1360 if (!dwc->phys_ready) {
1361 ret = dwc3_core_get_phy(dwc);
1362 if (ret)
1363 goto err_exit_ulpi;
1364 dwc->phys_ready = true;
1365 }
1366
1367 ret = dwc3_phy_init(dwc);
1368 if (ret)
1369 goto err_exit_ulpi;
1370
1371 ret = dwc3_core_soft_reset(dwc);
1372 if (ret)
1373 goto err_exit_phy;
1374
1375 dwc3_core_setup_global_control(dwc);
1376 dwc3_core_num_eps(dwc);
1377
1378 /* Set power down scale of suspend_clk */
1379 dwc3_set_power_down_clk_scale(dwc);
1380
1381 /* Adjust Frame Length */
1382 dwc3_frame_length_adjustment(dwc);
1383
1384 /* Adjust Reference Clock Period */
1385 dwc3_ref_clk_period(dwc);
1386
1387 dwc3_set_incr_burst_type(dwc);
1388
1389 dwc3_config_soc_bus(dwc);
1390
1391 ret = dwc3_phy_power_on(dwc);
1392 if (ret)
1393 goto err_exit_phy;
1394
1395 ret = dwc3_event_buffers_setup(dwc);
1396 if (ret) {
1397 dev_err(dwc->dev, "failed to setup event buffers\n");
1398 goto err_power_off_phy;
1399 }
1400
1401 /*
1402 * ENDXFER polling is available on version 3.10a and later of
1403 * the DWC_usb3 controller. It is NOT available in the
1404 * DWC_usb31 controller.
1405 */
1406 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
1407 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1408 reg |= DWC3_GUCTL2_RST_ACTBITLATER;
1409 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1410 }
1411
1412 /*
1413 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
1414 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
1415 * link compliance test (TD7.21) may fail. If the ECN is not
1416 * enabled (GUCTL2[19] = 0), the controller will use the old timer
1417 * value (5us), which is still acceptable for the link compliance
1418 * test. Therefore, do not enable PM TIMER ECM in 3.20a by
1419 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
1420 */
1421 if (DWC3_VER_IS(DWC3, 320A)) {
1422 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1423 reg &= ~DWC3_GUCTL2_LC_TIMER;
1424 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1425 }
1426
1427 /*
1428 * When configured in HOST mode, after issuing U3/L2 exit controller
1429 * fails to send proper CRC checksum in CRC5 field. Because of this
1430 * behaviour Transaction Error is generated, resulting in reset and
1431 * re-enumeration of usb device attached. All the termsel, xcvrsel,
1432 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
1433 * will correct this problem. This option is to support certain
1434 * legacy ULPI PHYs.
1435 */
1436 if (dwc->resume_hs_terminations) {
1437 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1438 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
1439 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1440 }
1441
1442 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
1443 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1444
1445 /*
1446 * Enable hardware control of sending remote wakeup
1447 * in HS when the device is in the L1 state.
1448 */
1449 if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
1450 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
1451
1452 /*
1453 * Decouple USB 2.0 L1 & L2 events which will allow for
1454 * gadget driver to only receive U3/L2 suspend & wakeup
1455 * events and prevent the more frequent L1 LPM transitions
1456 * from interrupting the driver.
1457 */
1458 if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
1459 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
1460
1461 if (dwc->dis_tx_ipgap_linecheck_quirk)
1462 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
1463
1464 if (dwc->parkmode_disable_ss_quirk)
1465 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
1466
1467 if (dwc->parkmode_disable_hs_quirk)
1468 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
1469
1470 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY)) {
1471 if (dwc->maximum_speed == USB_SPEED_FULL ||
1472 dwc->maximum_speed == USB_SPEED_HIGH)
1473 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1474 else
1475 reg &= ~DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1476 }
1477
1478 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1479 }
1480
1481 dwc3_config_threshold(dwc);
1482
1483 /*
1484 * Modify this for all supported Super Speed ports when
1485 * multiport support is added.
1486 */
1487 if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
1488 (DWC3_IP_IS(DWC31)) &&
1489 dwc->maximum_speed == USB_SPEED_SUPER) {
1490 int i;
1491
1492 for (i = 0; i < dwc->num_usb3_ports; i++) {
1493 reg = dwc3_readl(dwc->regs, DWC3_LLUCTL(i));
1494 reg |= DWC3_LLUCTL_FORCE_GEN1;
1495 dwc3_writel(dwc->regs, DWC3_LLUCTL(i), reg);
1496 }
1497 }
1498
1499 /*
1500 * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and
1501 * prior. When an active endpoint not currently cached in the host
1502 * controller is chosen to be cached to the same index as an endpoint
1503 * receiving NAKs, the endpoint receiving NAKs enters continuous
1504 * retry mode. This prevents it from being evicted from the host
1505 * controller cache, blocking the new endpoint from being cached and
1506 * serviced.
1507 *
1508 * To resolve this, for controller versions 1.70a and 1.80a, set the
1509 * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit
1510 * disables the USB2.0 internal retry feature. The GUCTL3[16] register
1511 * function is available only from version 1.70a.
1512 */
1513 if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) {
1514 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
1515 reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE;
1516 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
1517 }
1518
1519 return 0;
1520
1521 err_power_off_phy:
1522 dwc3_phy_power_off(dwc);
1523 err_exit_phy:
1524 dwc3_phy_exit(dwc);
1525 err_exit_ulpi:
1526 dwc3_ulpi_exit(dwc);
1527
1528 return ret;
1529 }
1530
dwc3_core_get_phy(struct dwc3 * dwc)1531 static int dwc3_core_get_phy(struct dwc3 *dwc)
1532 {
1533 struct device *dev = dwc->dev;
1534 struct device_node *node = dev->of_node;
1535 char phy_name[9];
1536 int ret;
1537 u8 i;
1538
1539 if (node) {
1540 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
1541 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
1542 } else {
1543 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
1544 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
1545 }
1546
1547 if (IS_ERR(dwc->usb2_phy)) {
1548 ret = PTR_ERR(dwc->usb2_phy);
1549 if (ret == -ENXIO || ret == -ENODEV)
1550 dwc->usb2_phy = NULL;
1551 else
1552 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1553 }
1554
1555 if (IS_ERR(dwc->usb3_phy)) {
1556 ret = PTR_ERR(dwc->usb3_phy);
1557 if (ret == -ENXIO || ret == -ENODEV)
1558 dwc->usb3_phy = NULL;
1559 else
1560 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1561 }
1562
1563 for (i = 0; i < dwc->num_usb2_ports; i++) {
1564 if (dwc->num_usb2_ports == 1)
1565 snprintf(phy_name, sizeof(phy_name), "usb2-phy");
1566 else
1567 snprintf(phy_name, sizeof(phy_name), "usb2-%u", i);
1568
1569 dwc->usb2_generic_phy[i] = devm_phy_get(dev, phy_name);
1570 if (IS_ERR(dwc->usb2_generic_phy[i])) {
1571 ret = PTR_ERR(dwc->usb2_generic_phy[i]);
1572 if (ret == -ENOSYS || ret == -ENODEV)
1573 dwc->usb2_generic_phy[i] = NULL;
1574 else
1575 return dev_err_probe(dev, ret, "failed to lookup phy %s\n",
1576 phy_name);
1577 }
1578 }
1579
1580 for (i = 0; i < dwc->num_usb3_ports; i++) {
1581 if (dwc->num_usb3_ports == 1)
1582 snprintf(phy_name, sizeof(phy_name), "usb3-phy");
1583 else
1584 snprintf(phy_name, sizeof(phy_name), "usb3-%u", i);
1585
1586 dwc->usb3_generic_phy[i] = devm_phy_get(dev, phy_name);
1587 if (IS_ERR(dwc->usb3_generic_phy[i])) {
1588 ret = PTR_ERR(dwc->usb3_generic_phy[i]);
1589 if (ret == -ENOSYS || ret == -ENODEV)
1590 dwc->usb3_generic_phy[i] = NULL;
1591 else
1592 return dev_err_probe(dev, ret, "failed to lookup phy %s\n",
1593 phy_name);
1594 }
1595 }
1596
1597 return 0;
1598 }
1599
dwc3_core_init_mode(struct dwc3 * dwc)1600 static int dwc3_core_init_mode(struct dwc3 *dwc)
1601 {
1602 struct device *dev = dwc->dev;
1603 int ret;
1604 int i;
1605
1606 switch (dwc->dr_mode) {
1607 case USB_DR_MODE_PERIPHERAL:
1608 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
1609
1610 if (dwc->usb2_phy)
1611 otg_set_vbus(dwc->usb2_phy->otg, false);
1612 phy_set_mode(dwc->usb2_generic_phy[0], PHY_MODE_USB_DEVICE);
1613 phy_set_mode(dwc->usb3_generic_phy[0], PHY_MODE_USB_DEVICE);
1614
1615 ret = dwc3_gadget_init(dwc);
1616 if (ret)
1617 return dev_err_probe(dev, ret, "failed to initialize gadget\n");
1618 break;
1619 case USB_DR_MODE_HOST:
1620 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
1621
1622 if (dwc->usb2_phy)
1623 otg_set_vbus(dwc->usb2_phy->otg, true);
1624 for (i = 0; i < dwc->num_usb2_ports; i++)
1625 phy_set_mode(dwc->usb2_generic_phy[i], PHY_MODE_USB_HOST);
1626 for (i = 0; i < dwc->num_usb3_ports; i++)
1627 phy_set_mode(dwc->usb3_generic_phy[i], PHY_MODE_USB_HOST);
1628
1629 ret = dwc3_host_init(dwc);
1630 if (ret)
1631 return dev_err_probe(dev, ret, "failed to initialize host\n");
1632 break;
1633 case USB_DR_MODE_OTG:
1634 INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
1635 ret = dwc3_drd_init(dwc);
1636 if (ret)
1637 return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
1638 break;
1639 default:
1640 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
1641 return -EINVAL;
1642 }
1643
1644 return 0;
1645 }
1646
dwc3_core_exit_mode(struct dwc3 * dwc)1647 static void dwc3_core_exit_mode(struct dwc3 *dwc)
1648 {
1649 switch (dwc->dr_mode) {
1650 case USB_DR_MODE_PERIPHERAL:
1651 dwc3_gadget_exit(dwc);
1652 break;
1653 case USB_DR_MODE_HOST:
1654 dwc3_host_exit(dwc);
1655 break;
1656 case USB_DR_MODE_OTG:
1657 dwc3_drd_exit(dwc);
1658 break;
1659 default:
1660 /* do nothing */
1661 break;
1662 }
1663
1664 /* de-assert DRVVBUS for HOST and OTG mode */
1665 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
1666 }
1667
dwc3_get_software_properties(struct dwc3 * dwc)1668 static void dwc3_get_software_properties(struct dwc3 *dwc)
1669 {
1670 struct device *tmpdev;
1671 u16 gsbuscfg0_reqinfo;
1672 int ret;
1673
1674 dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED;
1675
1676 /*
1677 * Iterate over all parent nodes for finding swnode properties
1678 * and non-DT (non-ABI) properties.
1679 */
1680 for (tmpdev = dwc->dev; tmpdev; tmpdev = tmpdev->parent) {
1681 ret = device_property_read_u16(tmpdev,
1682 "snps,gsbuscfg0-reqinfo",
1683 &gsbuscfg0_reqinfo);
1684 if (!ret)
1685 dwc->gsbuscfg0_reqinfo = gsbuscfg0_reqinfo;
1686 }
1687 }
1688
dwc3_get_properties(struct dwc3 * dwc)1689 static void dwc3_get_properties(struct dwc3 *dwc)
1690 {
1691 struct device *dev = dwc->dev;
1692 u8 lpm_nyet_threshold;
1693 u8 tx_de_emphasis;
1694 u8 hird_threshold;
1695 u8 rx_thr_num_pkt = 0;
1696 u8 rx_max_burst = 0;
1697 u8 tx_thr_num_pkt = 0;
1698 u8 tx_max_burst = 0;
1699 u8 rx_thr_num_pkt_prd = 0;
1700 u8 rx_max_burst_prd = 0;
1701 u8 tx_thr_num_pkt_prd = 0;
1702 u8 tx_max_burst_prd = 0;
1703 u8 tx_fifo_resize_max_num;
1704 u16 num_hc_interrupters;
1705
1706 /* default to highest possible threshold */
1707 lpm_nyet_threshold = 0xf;
1708
1709 /* default to -3.5dB de-emphasis */
1710 tx_de_emphasis = 1;
1711
1712 /*
1713 * default to assert utmi_sleep_n and use maximum allowed HIRD
1714 * threshold value of 0b1100
1715 */
1716 hird_threshold = 12;
1717
1718 /*
1719 * default to a TXFIFO size large enough to fit 6 max packets. This
1720 * allows for systems with larger bus latencies to have some headroom
1721 * for endpoints that have a large bMaxBurst value.
1722 */
1723 tx_fifo_resize_max_num = 6;
1724
1725 /* default to a single XHCI interrupter */
1726 num_hc_interrupters = 1;
1727
1728 dwc->maximum_speed = usb_get_maximum_speed(dev);
1729 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
1730 dwc->dr_mode = usb_get_dr_mode(dev);
1731 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
1732
1733 dwc->sysdev_is_parent = device_property_read_bool(dev,
1734 "linux,sysdev_is_parent");
1735 if (dwc->sysdev_is_parent)
1736 dwc->sysdev = dwc->dev->parent;
1737 else
1738 dwc->sysdev = dwc->dev;
1739
1740 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
1741
1742 dwc->has_lpm_erratum = device_property_read_bool(dev,
1743 "snps,has-lpm-erratum");
1744 device_property_read_u8(dev, "snps,lpm-nyet-threshold",
1745 &lpm_nyet_threshold);
1746 dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
1747 "snps,is-utmi-l1-suspend");
1748 device_property_read_u8(dev, "snps,hird-threshold",
1749 &hird_threshold);
1750 dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
1751 "snps,dis-start-transfer-quirk");
1752 dwc->usb3_lpm_capable = device_property_read_bool(dev,
1753 "snps,usb3_lpm_capable");
1754 dwc->usb2_lpm_disable = device_property_read_bool(dev,
1755 "snps,usb2-lpm-disable");
1756 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
1757 "snps,usb2-gadget-lpm-disable");
1758 device_property_read_u8(dev, "snps,rx-thr-num-pkt",
1759 &rx_thr_num_pkt);
1760 device_property_read_u8(dev, "snps,rx-max-burst",
1761 &rx_max_burst);
1762 device_property_read_u8(dev, "snps,tx-thr-num-pkt",
1763 &tx_thr_num_pkt);
1764 device_property_read_u8(dev, "snps,tx-max-burst",
1765 &tx_max_burst);
1766 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
1767 &rx_thr_num_pkt_prd);
1768 device_property_read_u8(dev, "snps,rx-max-burst-prd",
1769 &rx_max_burst_prd);
1770 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
1771 &tx_thr_num_pkt_prd);
1772 device_property_read_u8(dev, "snps,tx-max-burst-prd",
1773 &tx_max_burst_prd);
1774 device_property_read_u16(dev, "num-hc-interrupters",
1775 &num_hc_interrupters);
1776 /* DWC3 core allowed to have a max of 8 interrupters */
1777 if (num_hc_interrupters > 8)
1778 num_hc_interrupters = 8;
1779
1780 dwc->do_fifo_resize = device_property_read_bool(dev,
1781 "tx-fifo-resize");
1782 if (dwc->do_fifo_resize)
1783 device_property_read_u8(dev, "tx-fifo-max-num",
1784 &tx_fifo_resize_max_num);
1785
1786 dwc->disable_scramble_quirk = device_property_read_bool(dev,
1787 "snps,disable_scramble_quirk");
1788 dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
1789 "snps,u2exit_lfps_quirk");
1790 dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
1791 "snps,u2ss_inp3_quirk");
1792 dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
1793 "snps,req_p1p2p3_quirk");
1794 dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
1795 "snps,del_p1p2p3_quirk");
1796 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
1797 "snps,del_phy_power_chg_quirk");
1798 dwc->lfps_filter_quirk = device_property_read_bool(dev,
1799 "snps,lfps_filter_quirk");
1800 dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
1801 "snps,rx_detect_poll_quirk");
1802 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
1803 "snps,dis_u3_susphy_quirk");
1804 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
1805 "snps,dis_u2_susphy_quirk");
1806 dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
1807 "snps,dis_enblslpm_quirk");
1808 dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
1809 "snps,dis-u1-entry-quirk");
1810 dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
1811 "snps,dis-u2-entry-quirk");
1812 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
1813 "snps,dis_rxdet_inp3_quirk");
1814 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
1815 "snps,dis-u2-freeclk-exists-quirk");
1816 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
1817 "snps,dis-del-phy-power-chg-quirk");
1818 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
1819 "snps,dis-tx-ipgap-linecheck-quirk");
1820 dwc->resume_hs_terminations = device_property_read_bool(dev,
1821 "snps,resume-hs-terminations");
1822 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
1823 "snps,ulpi-ext-vbus-drv");
1824 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
1825 "snps,parkmode-disable-ss-quirk");
1826 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
1827 "snps,parkmode-disable-hs-quirk");
1828 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
1829 "snps,gfladj-refclk-lpm-sel-quirk");
1830
1831 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
1832 "snps,tx_de_emphasis_quirk");
1833 device_property_read_u8(dev, "snps,tx_de_emphasis",
1834 &tx_de_emphasis);
1835 device_property_read_string(dev, "snps,hsphy_interface",
1836 &dwc->hsphy_interface);
1837 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
1838 &dwc->fladj);
1839 device_property_read_u32(dev, "snps,ref-clock-period-ns",
1840 &dwc->ref_clk_per);
1841
1842 dwc->dis_metastability_quirk = device_property_read_bool(dev,
1843 "snps,dis_metastability_quirk");
1844
1845 dwc->dis_split_quirk = device_property_read_bool(dev,
1846 "snps,dis-split-quirk");
1847
1848 dwc->lpm_nyet_threshold = lpm_nyet_threshold;
1849 dwc->tx_de_emphasis = tx_de_emphasis;
1850
1851 dwc->hird_threshold = hird_threshold;
1852
1853 dwc->rx_thr_num_pkt = rx_thr_num_pkt;
1854 dwc->rx_max_burst = rx_max_burst;
1855
1856 dwc->tx_thr_num_pkt = tx_thr_num_pkt;
1857 dwc->tx_max_burst = tx_max_burst;
1858
1859 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
1860 dwc->rx_max_burst_prd = rx_max_burst_prd;
1861
1862 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
1863 dwc->tx_max_burst_prd = tx_max_burst_prd;
1864
1865 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
1866
1867 dwc->num_hc_interrupters = num_hc_interrupters;
1868 }
1869
1870 /* check whether the core supports IMOD */
dwc3_has_imod(struct dwc3 * dwc)1871 bool dwc3_has_imod(struct dwc3 *dwc)
1872 {
1873 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
1874 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
1875 DWC3_IP_IS(DWC32);
1876 }
1877
dwc3_check_params(struct dwc3 * dwc)1878 static void dwc3_check_params(struct dwc3 *dwc)
1879 {
1880 struct device *dev = dwc->dev;
1881 unsigned int hwparam_gen =
1882 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
1883
1884 /*
1885 * Enable IMOD for all supporting controllers.
1886 *
1887 * Particularly, DWC_usb3 v3.00a must enable this feature for
1888 * the following reason:
1889 *
1890 * Workaround for STAR 9000961433 which affects only version
1891 * 3.00a of the DWC_usb3 core. This prevents the controller
1892 * interrupt from being masked while handling events. IMOD
1893 * allows us to work around this issue. Enable it for the
1894 * affected version.
1895 */
1896 if (dwc3_has_imod((dwc)))
1897 dwc->imod_interval = 1;
1898
1899 /* Check the maximum_speed parameter */
1900 switch (dwc->maximum_speed) {
1901 case USB_SPEED_FULL:
1902 case USB_SPEED_HIGH:
1903 break;
1904 case USB_SPEED_SUPER:
1905 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
1906 dev_warn(dev, "UDC doesn't support Gen 1\n");
1907 break;
1908 case USB_SPEED_SUPER_PLUS:
1909 if ((DWC3_IP_IS(DWC32) &&
1910 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
1911 (!DWC3_IP_IS(DWC32) &&
1912 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1913 dev_warn(dev, "UDC doesn't support SSP\n");
1914 break;
1915 default:
1916 dev_err(dev, "invalid maximum_speed parameter %d\n",
1917 dwc->maximum_speed);
1918 fallthrough;
1919 case USB_SPEED_UNKNOWN:
1920 switch (hwparam_gen) {
1921 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1922 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1923 break;
1924 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1925 if (DWC3_IP_IS(DWC32))
1926 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1927 else
1928 dwc->maximum_speed = USB_SPEED_SUPER;
1929 break;
1930 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
1931 dwc->maximum_speed = USB_SPEED_HIGH;
1932 break;
1933 default:
1934 dwc->maximum_speed = USB_SPEED_SUPER;
1935 break;
1936 }
1937 break;
1938 }
1939
1940 /*
1941 * Currently the controller does not have visibility into the HW
1942 * parameter to determine the maximum number of lanes the HW supports.
1943 * If the number of lanes is not specified in the device property, then
1944 * set the default to support dual-lane for DWC_usb32 and single-lane
1945 * for DWC_usb31 for super-speed-plus.
1946 */
1947 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
1948 switch (dwc->max_ssp_rate) {
1949 case USB_SSP_GEN_2x1:
1950 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
1951 dev_warn(dev, "UDC only supports Gen 1\n");
1952 break;
1953 case USB_SSP_GEN_1x2:
1954 case USB_SSP_GEN_2x2:
1955 if (DWC3_IP_IS(DWC31))
1956 dev_warn(dev, "UDC only supports single lane\n");
1957 break;
1958 case USB_SSP_GEN_UNKNOWN:
1959 default:
1960 switch (hwparam_gen) {
1961 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1962 if (DWC3_IP_IS(DWC32))
1963 dwc->max_ssp_rate = USB_SSP_GEN_2x2;
1964 else
1965 dwc->max_ssp_rate = USB_SSP_GEN_2x1;
1966 break;
1967 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1968 if (DWC3_IP_IS(DWC32))
1969 dwc->max_ssp_rate = USB_SSP_GEN_1x2;
1970 break;
1971 }
1972 break;
1973 }
1974 }
1975 }
1976
dwc3_get_extcon(struct dwc3 * dwc)1977 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
1978 {
1979 struct device *dev = dwc->dev;
1980 struct device_node *np_phy;
1981 struct extcon_dev *edev = NULL;
1982 const char *name;
1983
1984 if (device_property_present(dev, "extcon"))
1985 return extcon_get_edev_by_phandle(dev, 0);
1986
1987 /*
1988 * Device tree platforms should get extcon via phandle.
1989 * On ACPI platforms, we get the name from a device property.
1990 * This device property is for kernel internal use only and
1991 * is expected to be set by the glue code.
1992 */
1993 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
1994 return extcon_get_extcon_dev(name);
1995
1996 /*
1997 * Check explicitly if "usb-role-switch" is used since
1998 * extcon_find_edev_by_node() can not be used to check the absence of
1999 * an extcon device. In the absence of an device it will always return
2000 * EPROBE_DEFER.
2001 */
2002 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
2003 device_property_read_bool(dev, "usb-role-switch"))
2004 return NULL;
2005
2006 /*
2007 * Try to get an extcon device from the USB PHY controller's "port"
2008 * node. Check if it has the "port" node first, to avoid printing the
2009 * error message from underlying code, as it's a valid case: extcon
2010 * device (and "port" node) may be missing in case of "usb-role-switch"
2011 * or OTG mode.
2012 */
2013 np_phy = of_parse_phandle(dev->of_node, "phys", 0);
2014 if (of_graph_is_present(np_phy)) {
2015 struct device_node *np_conn;
2016
2017 np_conn = of_graph_get_remote_node(np_phy, -1, -1);
2018 if (np_conn)
2019 edev = extcon_find_edev_by_node(np_conn);
2020 of_node_put(np_conn);
2021 }
2022 of_node_put(np_phy);
2023
2024 return edev;
2025 }
2026
dwc3_get_clocks(struct dwc3 * dwc)2027 static int dwc3_get_clocks(struct dwc3 *dwc)
2028 {
2029 struct device *dev = dwc->dev;
2030
2031 if (!dev->of_node)
2032 return 0;
2033
2034 /*
2035 * Clocks are optional, but new DT platforms should support all clocks
2036 * as required by the DT-binding.
2037 * Some devices have different clock names in legacy device trees,
2038 * check for them to retain backwards compatibility.
2039 */
2040 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
2041 if (IS_ERR(dwc->bus_clk)) {
2042 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
2043 "could not get bus clock\n");
2044 }
2045
2046 if (dwc->bus_clk == NULL) {
2047 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
2048 if (IS_ERR(dwc->bus_clk)) {
2049 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
2050 "could not get bus clock\n");
2051 }
2052 }
2053
2054 dwc->ref_clk = devm_clk_get_optional(dev, "ref");
2055 if (IS_ERR(dwc->ref_clk)) {
2056 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
2057 "could not get ref clock\n");
2058 }
2059
2060 if (dwc->ref_clk == NULL) {
2061 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
2062 if (IS_ERR(dwc->ref_clk)) {
2063 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
2064 "could not get ref clock\n");
2065 }
2066 }
2067
2068 dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
2069 if (IS_ERR(dwc->susp_clk)) {
2070 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
2071 "could not get suspend clock\n");
2072 }
2073
2074 if (dwc->susp_clk == NULL) {
2075 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
2076 if (IS_ERR(dwc->susp_clk)) {
2077 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
2078 "could not get suspend clock\n");
2079 }
2080 }
2081
2082 /* specific to Rockchip RK3588 */
2083 dwc->utmi_clk = devm_clk_get_optional(dev, "utmi");
2084 if (IS_ERR(dwc->utmi_clk)) {
2085 return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk),
2086 "could not get utmi clock\n");
2087 }
2088
2089 /* specific to Rockchip RK3588 */
2090 dwc->pipe_clk = devm_clk_get_optional(dev, "pipe");
2091 if (IS_ERR(dwc->pipe_clk)) {
2092 return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk),
2093 "could not get pipe clock\n");
2094 }
2095
2096 return 0;
2097 }
2098
dwc3_get_num_ports(struct dwc3 * dwc)2099 static int dwc3_get_num_ports(struct dwc3 *dwc)
2100 {
2101 void __iomem *base;
2102 u8 major_revision;
2103 u32 offset;
2104 u32 val;
2105
2106 /*
2107 * Remap xHCI address space to access XHCI ext cap regs since it is
2108 * needed to get information on number of ports present.
2109 */
2110 base = ioremap(dwc->xhci_resources[0].start,
2111 resource_size(&dwc->xhci_resources[0]));
2112 if (!base)
2113 return -ENOMEM;
2114
2115 offset = 0;
2116 do {
2117 offset = xhci_find_next_ext_cap(base, offset,
2118 XHCI_EXT_CAPS_PROTOCOL);
2119 if (!offset)
2120 break;
2121
2122 val = readl(base + offset);
2123 major_revision = XHCI_EXT_PORT_MAJOR(val);
2124
2125 val = readl(base + offset + 0x08);
2126 if (major_revision == 0x03) {
2127 dwc->num_usb3_ports += XHCI_EXT_PORT_COUNT(val);
2128 } else if (major_revision <= 0x02) {
2129 dwc->num_usb2_ports += XHCI_EXT_PORT_COUNT(val);
2130 } else {
2131 dev_warn(dwc->dev, "unrecognized port major revision %d\n",
2132 major_revision);
2133 }
2134 } while (1);
2135
2136 dev_dbg(dwc->dev, "hs-ports: %u ss-ports: %u\n",
2137 dwc->num_usb2_ports, dwc->num_usb3_ports);
2138
2139 iounmap(base);
2140
2141 if (dwc->num_usb2_ports > DWC3_USB2_MAX_PORTS ||
2142 dwc->num_usb3_ports > DWC3_USB3_MAX_PORTS)
2143 return -EINVAL;
2144
2145 return 0;
2146 }
2147
dwc3_get_usb_power_supply(struct dwc3 * dwc)2148 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
2149 {
2150 struct power_supply *usb_psy;
2151 const char *usb_psy_name;
2152 int ret;
2153
2154 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
2155 if (ret < 0)
2156 return NULL;
2157
2158 usb_psy = power_supply_get_by_name(usb_psy_name);
2159 if (!usb_psy)
2160 return ERR_PTR(-EPROBE_DEFER);
2161
2162 return usb_psy;
2163 }
2164
dwc3_core_probe(const struct dwc3_probe_data * data)2165 int dwc3_core_probe(const struct dwc3_probe_data *data)
2166 {
2167 struct dwc3 *dwc = data->dwc;
2168 struct device *dev = dwc->dev;
2169 struct resource dwc_res;
2170 unsigned int hw_mode;
2171 void __iomem *regs;
2172 struct resource *res = data->res;
2173 int ret;
2174
2175 dwc->xhci_resources[0].start = res->start;
2176 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
2177 DWC3_XHCI_REGS_END;
2178 dwc->xhci_resources[0].flags = res->flags;
2179 dwc->xhci_resources[0].name = res->name;
2180
2181 /*
2182 * Request memory region but exclude xHCI regs,
2183 * since it will be requested by the xhci-plat driver.
2184 */
2185 dwc_res = *res;
2186 dwc_res.start += DWC3_GLOBALS_REGS_START;
2187
2188 if (dev->of_node) {
2189 struct device_node *parent = of_get_parent(dev->of_node);
2190
2191 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
2192 dwc_res.start -= DWC3_GLOBALS_REGS_START;
2193 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
2194 }
2195
2196 of_node_put(parent);
2197 }
2198
2199 regs = devm_ioremap_resource(dev, &dwc_res);
2200 if (IS_ERR(regs))
2201 return PTR_ERR(regs);
2202
2203 dwc->regs = regs;
2204 dwc->regs_size = resource_size(&dwc_res);
2205
2206 dwc3_get_properties(dwc);
2207
2208 dwc3_get_software_properties(dwc);
2209
2210 dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
2211 if (IS_ERR(dwc->usb_psy))
2212 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
2213
2214 if (!data->ignore_clocks_and_resets) {
2215 dwc->reset = devm_reset_control_array_get_optional_shared(dev);
2216 if (IS_ERR(dwc->reset)) {
2217 ret = PTR_ERR(dwc->reset);
2218 goto err_put_psy;
2219 }
2220
2221 ret = dwc3_get_clocks(dwc);
2222 if (ret)
2223 goto err_put_psy;
2224 }
2225
2226 ret = reset_control_deassert(dwc->reset);
2227 if (ret)
2228 goto err_put_psy;
2229
2230 ret = dwc3_clk_enable(dwc);
2231 if (ret)
2232 goto err_assert_reset;
2233
2234 if (!dwc3_core_is_valid(dwc)) {
2235 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
2236 ret = -ENODEV;
2237 goto err_disable_clks;
2238 }
2239
2240 dev_set_drvdata(dev, dwc);
2241 dwc3_cache_hwparams(dwc);
2242
2243 if (!dwc->sysdev_is_parent &&
2244 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
2245 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
2246 if (ret)
2247 goto err_disable_clks;
2248 }
2249
2250 /*
2251 * Currently only DWC3 controllers that are host-only capable
2252 * can have more than one port.
2253 */
2254 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
2255 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
2256 ret = dwc3_get_num_ports(dwc);
2257 if (ret)
2258 goto err_disable_clks;
2259 } else {
2260 dwc->num_usb2_ports = 1;
2261 dwc->num_usb3_ports = 1;
2262 }
2263
2264 spin_lock_init(&dwc->lock);
2265 mutex_init(&dwc->mutex);
2266
2267 pm_runtime_get_noresume(dev);
2268 pm_runtime_set_active(dev);
2269 pm_runtime_use_autosuspend(dev);
2270 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
2271 pm_runtime_enable(dev);
2272
2273 pm_runtime_forbid(dev);
2274
2275 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
2276 if (ret) {
2277 dev_err(dwc->dev, "failed to allocate event buffers\n");
2278 ret = -ENOMEM;
2279 goto err_allow_rpm;
2280 }
2281
2282 dwc->edev = dwc3_get_extcon(dwc);
2283 if (IS_ERR(dwc->edev)) {
2284 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
2285 goto err_free_event_buffers;
2286 }
2287
2288 ret = dwc3_get_dr_mode(dwc);
2289 if (ret)
2290 goto err_free_event_buffers;
2291
2292 ret = dwc3_core_init(dwc);
2293 if (ret) {
2294 dev_err_probe(dev, ret, "failed to initialize core\n");
2295 goto err_free_event_buffers;
2296 }
2297
2298 dwc3_check_params(dwc);
2299 dwc3_debugfs_init(dwc);
2300
2301 ret = dwc3_core_init_mode(dwc);
2302 if (ret)
2303 goto err_exit_debugfs;
2304
2305 pm_runtime_put(dev);
2306
2307 dma_set_max_seg_size(dev, UINT_MAX);
2308
2309 return 0;
2310
2311 err_exit_debugfs:
2312 dwc3_debugfs_exit(dwc);
2313 dwc3_event_buffers_cleanup(dwc);
2314 dwc3_phy_power_off(dwc);
2315 dwc3_phy_exit(dwc);
2316 dwc3_ulpi_exit(dwc);
2317 err_free_event_buffers:
2318 dwc3_free_event_buffers(dwc);
2319 err_allow_rpm:
2320 pm_runtime_allow(dev);
2321 pm_runtime_disable(dev);
2322 pm_runtime_dont_use_autosuspend(dev);
2323 pm_runtime_set_suspended(dev);
2324 pm_runtime_put_noidle(dev);
2325 err_disable_clks:
2326 dwc3_clk_disable(dwc);
2327 err_assert_reset:
2328 reset_control_assert(dwc->reset);
2329 err_put_psy:
2330 if (dwc->usb_psy)
2331 power_supply_put(dwc->usb_psy);
2332
2333 return ret;
2334 }
2335 EXPORT_SYMBOL_GPL(dwc3_core_probe);
2336
dwc3_probe(struct platform_device * pdev)2337 static int dwc3_probe(struct platform_device *pdev)
2338 {
2339 struct dwc3_probe_data probe_data = {};
2340 struct resource *res;
2341 struct dwc3 *dwc;
2342
2343 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2344 if (!res) {
2345 dev_err(&pdev->dev, "missing memory resource\n");
2346 return -ENODEV;
2347 }
2348
2349 dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL);
2350 if (!dwc)
2351 return -ENOMEM;
2352
2353 dwc->dev = &pdev->dev;
2354
2355 probe_data.dwc = dwc;
2356 probe_data.res = res;
2357
2358 return dwc3_core_probe(&probe_data);
2359 }
2360
dwc3_core_remove(struct dwc3 * dwc)2361 void dwc3_core_remove(struct dwc3 *dwc)
2362 {
2363 pm_runtime_get_sync(dwc->dev);
2364
2365 dwc3_core_exit_mode(dwc);
2366 dwc3_debugfs_exit(dwc);
2367
2368 dwc3_core_exit(dwc);
2369 dwc3_ulpi_exit(dwc);
2370
2371 pm_runtime_allow(dwc->dev);
2372 pm_runtime_disable(dwc->dev);
2373 pm_runtime_dont_use_autosuspend(dwc->dev);
2374 pm_runtime_put_noidle(dwc->dev);
2375 /*
2376 * HACK: Clear the driver data, which is currently accessed by parent
2377 * glue drivers, before allowing the parent to suspend.
2378 */
2379 dev_set_drvdata(dwc->dev, NULL);
2380 pm_runtime_set_suspended(dwc->dev);
2381
2382 dwc3_free_event_buffers(dwc);
2383
2384 if (dwc->usb_psy)
2385 power_supply_put(dwc->usb_psy);
2386 }
2387 EXPORT_SYMBOL_GPL(dwc3_core_remove);
2388
dwc3_remove(struct platform_device * pdev)2389 static void dwc3_remove(struct platform_device *pdev)
2390 {
2391 dwc3_core_remove(platform_get_drvdata(pdev));
2392 }
2393
2394 #ifdef CONFIG_PM
dwc3_core_init_for_resume(struct dwc3 * dwc)2395 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
2396 {
2397 int ret;
2398
2399 ret = reset_control_deassert(dwc->reset);
2400 if (ret)
2401 return ret;
2402
2403 ret = dwc3_clk_enable(dwc);
2404 if (ret)
2405 goto assert_reset;
2406
2407 ret = dwc3_core_init(dwc);
2408 if (ret)
2409 goto disable_clks;
2410
2411 return 0;
2412
2413 disable_clks:
2414 dwc3_clk_disable(dwc);
2415 assert_reset:
2416 reset_control_assert(dwc->reset);
2417
2418 return ret;
2419 }
2420
dwc3_suspend_common(struct dwc3 * dwc,pm_message_t msg)2421 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
2422 {
2423 u32 reg;
2424 int i;
2425
2426 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
2427 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
2428 DWC3_GUSB2PHYCFG_SUSPHY) ||
2429 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
2430 DWC3_GUSB3PIPECTL_SUSPHY);
2431 /*
2432 * TI AM62 platform requires SUSPHY to be
2433 * enabled for system suspend to work.
2434 */
2435 if (!dwc->susphy_state)
2436 dwc3_enable_susphy(dwc, true);
2437 }
2438
2439 switch (dwc->current_dr_role) {
2440 case DWC3_GCTL_PRTCAP_DEVICE:
2441 if (pm_runtime_suspended(dwc->dev))
2442 break;
2443 dwc3_gadget_suspend(dwc);
2444 synchronize_irq(dwc->irq_gadget);
2445 dwc3_core_exit(dwc);
2446 break;
2447 case DWC3_GCTL_PRTCAP_HOST:
2448 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2449 dwc3_core_exit(dwc);
2450 break;
2451 }
2452
2453 /* Let controller to suspend HSPHY before PHY driver suspends */
2454 if (dwc->dis_u2_susphy_quirk ||
2455 dwc->dis_enblslpm_quirk) {
2456 for (i = 0; i < dwc->num_usb2_ports; i++) {
2457 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
2458 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
2459 DWC3_GUSB2PHYCFG_SUSPHY;
2460 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
2461 }
2462
2463 /* Give some time for USB2 PHY to suspend */
2464 usleep_range(5000, 6000);
2465 }
2466
2467 for (i = 0; i < dwc->num_usb2_ports; i++)
2468 phy_pm_runtime_put_sync(dwc->usb2_generic_phy[i]);
2469 for (i = 0; i < dwc->num_usb3_ports; i++)
2470 phy_pm_runtime_put_sync(dwc->usb3_generic_phy[i]);
2471 break;
2472 case DWC3_GCTL_PRTCAP_OTG:
2473 /* do nothing during runtime_suspend */
2474 if (PMSG_IS_AUTO(msg))
2475 break;
2476
2477 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2478 dwc3_gadget_suspend(dwc);
2479 synchronize_irq(dwc->irq_gadget);
2480 }
2481
2482 dwc3_otg_exit(dwc);
2483 dwc3_core_exit(dwc);
2484 break;
2485 default:
2486 /* do nothing */
2487 break;
2488 }
2489
2490 return 0;
2491 }
2492
dwc3_resume_common(struct dwc3 * dwc,pm_message_t msg)2493 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2494 {
2495 int ret;
2496 u32 reg;
2497 int i;
2498
2499 switch (dwc->current_dr_role) {
2500 case DWC3_GCTL_PRTCAP_DEVICE:
2501 ret = dwc3_core_init_for_resume(dwc);
2502 if (ret)
2503 return ret;
2504
2505 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
2506 dwc3_gadget_resume(dwc);
2507 break;
2508 case DWC3_GCTL_PRTCAP_HOST:
2509 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2510 ret = dwc3_core_init_for_resume(dwc);
2511 if (ret)
2512 return ret;
2513 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
2514 break;
2515 }
2516 /* Restore GUSB2PHYCFG bits that were modified in suspend */
2517 for (i = 0; i < dwc->num_usb2_ports; i++) {
2518 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(i));
2519 if (dwc->dis_u2_susphy_quirk)
2520 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2521
2522 if (dwc->dis_enblslpm_quirk)
2523 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
2524
2525 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
2526 }
2527
2528 for (i = 0; i < dwc->num_usb2_ports; i++)
2529 phy_pm_runtime_get_sync(dwc->usb2_generic_phy[i]);
2530 for (i = 0; i < dwc->num_usb3_ports; i++)
2531 phy_pm_runtime_get_sync(dwc->usb3_generic_phy[i]);
2532 break;
2533 case DWC3_GCTL_PRTCAP_OTG:
2534 /* nothing to do on runtime_resume */
2535 if (PMSG_IS_AUTO(msg))
2536 break;
2537
2538 ret = dwc3_core_init_for_resume(dwc);
2539 if (ret)
2540 return ret;
2541
2542 dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
2543
2544 dwc3_otg_init(dwc);
2545 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
2546 dwc3_otg_host_init(dwc);
2547 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2548 dwc3_gadget_resume(dwc);
2549 }
2550
2551 break;
2552 default:
2553 /* do nothing */
2554 break;
2555 }
2556
2557 if (!PMSG_IS_AUTO(msg)) {
2558 /* restore SUSPHY state to that before system suspend. */
2559 dwc3_enable_susphy(dwc, dwc->susphy_state);
2560 }
2561
2562 return 0;
2563 }
2564
dwc3_runtime_checks(struct dwc3 * dwc)2565 static int dwc3_runtime_checks(struct dwc3 *dwc)
2566 {
2567 switch (dwc->current_dr_role) {
2568 case DWC3_GCTL_PRTCAP_DEVICE:
2569 if (dwc->connected)
2570 return -EBUSY;
2571 break;
2572 case DWC3_GCTL_PRTCAP_HOST:
2573 default:
2574 /* do nothing */
2575 break;
2576 }
2577
2578 return 0;
2579 }
2580
dwc3_runtime_suspend(struct dwc3 * dwc)2581 int dwc3_runtime_suspend(struct dwc3 *dwc)
2582 {
2583 int ret;
2584
2585 if (dwc3_runtime_checks(dwc))
2586 return -EBUSY;
2587
2588 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
2589 if (ret)
2590 return ret;
2591
2592 return 0;
2593 }
2594 EXPORT_SYMBOL_GPL(dwc3_runtime_suspend);
2595
dwc3_runtime_resume(struct dwc3 * dwc)2596 int dwc3_runtime_resume(struct dwc3 *dwc)
2597 {
2598 struct device *dev = dwc->dev;
2599 int ret;
2600
2601 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
2602 if (ret)
2603 return ret;
2604
2605 switch (dwc->current_dr_role) {
2606 case DWC3_GCTL_PRTCAP_DEVICE:
2607 if (dwc->pending_events) {
2608 pm_runtime_put(dev);
2609 dwc->pending_events = false;
2610 enable_irq(dwc->irq_gadget);
2611 }
2612 break;
2613 case DWC3_GCTL_PRTCAP_HOST:
2614 default:
2615 /* do nothing */
2616 break;
2617 }
2618
2619 pm_runtime_mark_last_busy(dev);
2620
2621 return 0;
2622 }
2623 EXPORT_SYMBOL_GPL(dwc3_runtime_resume);
2624
dwc3_runtime_idle(struct dwc3 * dwc)2625 int dwc3_runtime_idle(struct dwc3 *dwc)
2626 {
2627 struct device *dev = dwc->dev;
2628
2629 switch (dwc->current_dr_role) {
2630 case DWC3_GCTL_PRTCAP_DEVICE:
2631 if (dwc3_runtime_checks(dwc))
2632 return -EBUSY;
2633 break;
2634 case DWC3_GCTL_PRTCAP_HOST:
2635 default:
2636 /* do nothing */
2637 break;
2638 }
2639
2640 pm_runtime_mark_last_busy(dev);
2641 pm_runtime_autosuspend(dev);
2642
2643 return 0;
2644 }
2645 EXPORT_SYMBOL_GPL(dwc3_runtime_idle);
2646
dwc3_plat_runtime_suspend(struct device * dev)2647 static int dwc3_plat_runtime_suspend(struct device *dev)
2648 {
2649 return dwc3_runtime_suspend(dev_get_drvdata(dev));
2650 }
2651
dwc3_plat_runtime_resume(struct device * dev)2652 static int dwc3_plat_runtime_resume(struct device *dev)
2653 {
2654 return dwc3_runtime_resume(dev_get_drvdata(dev));
2655 }
2656
dwc3_plat_runtime_idle(struct device * dev)2657 static int dwc3_plat_runtime_idle(struct device *dev)
2658 {
2659 return dwc3_runtime_idle(dev_get_drvdata(dev));
2660 }
2661 #endif /* CONFIG_PM */
2662
2663 #ifdef CONFIG_PM_SLEEP
dwc3_pm_suspend(struct dwc3 * dwc)2664 int dwc3_pm_suspend(struct dwc3 *dwc)
2665 {
2666 struct device *dev = dwc->dev;
2667 int ret;
2668
2669 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
2670 if (ret)
2671 return ret;
2672
2673 pinctrl_pm_select_sleep_state(dev);
2674
2675 return 0;
2676 }
2677 EXPORT_SYMBOL_GPL(dwc3_pm_suspend);
2678
dwc3_pm_resume(struct dwc3 * dwc)2679 int dwc3_pm_resume(struct dwc3 *dwc)
2680 {
2681 struct device *dev = dwc->dev;
2682 int ret = 0;
2683
2684 pinctrl_pm_select_default_state(dev);
2685
2686 pm_runtime_disable(dev);
2687 ret = pm_runtime_set_active(dev);
2688 if (ret)
2689 goto out;
2690
2691 ret = dwc3_resume_common(dwc, PMSG_RESUME);
2692 if (ret)
2693 pm_runtime_set_suspended(dev);
2694
2695 out:
2696 pm_runtime_enable(dev);
2697
2698 return ret;
2699 }
2700 EXPORT_SYMBOL_GPL(dwc3_pm_resume);
2701
dwc3_pm_complete(struct dwc3 * dwc)2702 void dwc3_pm_complete(struct dwc3 *dwc)
2703 {
2704 u32 reg;
2705
2706 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
2707 dwc->dis_split_quirk) {
2708 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
2709 reg |= DWC3_GUCTL3_SPLITDISABLE;
2710 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
2711 }
2712 }
2713 EXPORT_SYMBOL_GPL(dwc3_pm_complete);
2714
dwc3_pm_prepare(struct dwc3 * dwc)2715 int dwc3_pm_prepare(struct dwc3 *dwc)
2716 {
2717 struct device *dev = dwc->dev;
2718
2719 /*
2720 * Indicate to the PM core that it may safely leave the device in
2721 * runtime suspend if runtime-suspended already in device mode.
2722 */
2723 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE &&
2724 pm_runtime_suspended(dev) &&
2725 !dev_pinctrl(dev))
2726 return 1;
2727
2728 return 0;
2729 }
2730 EXPORT_SYMBOL_GPL(dwc3_pm_prepare);
2731
dwc3_plat_suspend(struct device * dev)2732 static int dwc3_plat_suspend(struct device *dev)
2733 {
2734 return dwc3_pm_suspend(dev_get_drvdata(dev));
2735 }
2736
dwc3_plat_resume(struct device * dev)2737 static int dwc3_plat_resume(struct device *dev)
2738 {
2739 return dwc3_pm_resume(dev_get_drvdata(dev));
2740 }
2741
dwc3_plat_complete(struct device * dev)2742 static void dwc3_plat_complete(struct device *dev)
2743 {
2744 dwc3_pm_complete(dev_get_drvdata(dev));
2745 }
2746
dwc3_plat_prepare(struct device * dev)2747 static int dwc3_plat_prepare(struct device *dev)
2748 {
2749 return dwc3_pm_prepare(dev_get_drvdata(dev));
2750 }
2751 #else
2752 #define dwc3_plat_complete NULL
2753 #define dwc3_plat_prepare NULL
2754 #endif /* CONFIG_PM_SLEEP */
2755
2756 static const struct dev_pm_ops dwc3_dev_pm_ops = {
2757 SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume)
2758 .complete = dwc3_plat_complete,
2759 .prepare = dwc3_plat_prepare,
2760 /*
2761 * Runtime suspend halts the controller on disconnection. It relies on
2762 * platforms with custom connection notification to start the controller
2763 * again.
2764 */
2765 SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume,
2766 dwc3_plat_runtime_idle)
2767 };
2768
2769 #ifdef CONFIG_OF
2770 static const struct of_device_id of_dwc3_match[] = {
2771 {
2772 .compatible = "snps,dwc3"
2773 },
2774 {
2775 .compatible = "synopsys,dwc3"
2776 },
2777 { },
2778 };
2779 MODULE_DEVICE_TABLE(of, of_dwc3_match);
2780 #endif
2781
2782 #ifdef CONFIG_ACPI
2783
2784 #define ACPI_ID_INTEL_BSW "808622B7"
2785
2786 static const struct acpi_device_id dwc3_acpi_match[] = {
2787 { ACPI_ID_INTEL_BSW, 0 },
2788 { },
2789 };
2790 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
2791 #endif
2792
2793 static struct platform_driver dwc3_driver = {
2794 .probe = dwc3_probe,
2795 .remove = dwc3_remove,
2796 .driver = {
2797 .name = "dwc3",
2798 .of_match_table = of_match_ptr(of_dwc3_match),
2799 .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
2800 .pm = &dwc3_dev_pm_ops,
2801 },
2802 };
2803
2804 module_platform_driver(dwc3_driver);
2805
2806 MODULE_ALIAS("platform:dwc3");
2807 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
2808 MODULE_LICENSE("GPL v2");
2809 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
2810